< prev index next >

src/hotspot/share/runtime/jniHandles.cpp

Print this page




  41   return _global_handles;
  42 }
  43 
  44 OopStorage* JNIHandles::weak_global_handles() {
  45   return _weak_global_handles;
  46 }
  47 
  48 // Serviceability agent support.
  49 OopStorage* JNIHandles::_global_handles = NULL;
  50 OopStorage* JNIHandles::_weak_global_handles = NULL;
  51 
  52 void jni_handles_init() {
  53   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global");
  54   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak");
  55 }
  56 
  57 jobject JNIHandles::make_local(oop obj) {
  58   return make_local(Thread::current(), obj);
  59 }
  60 
  61 
  62 jobject JNIHandles::make_local(Thread* thread, oop obj) {
  63   if (obj == NULL) {
  64     return NULL;                // ignore null handles
  65   } else {
  66     assert(oopDesc::is_oop(obj), "not an oop");
  67     assert(thread->is_Java_thread(), "not a Java thread");
  68     assert(!current_thread_in_native(), "must not be in native");
  69     return thread->active_handles()->allocate_handle(obj);
  70   }
  71 }
  72 
  73 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
  74                                              const char* handle_kind) {
  75   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  76     // Fake size value, since we don't know the min allocation size here.
  77     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
  78                           "Cannot create %s JNI handle", handle_kind);
  79   } else {
  80     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
  81   }
  82 }
  83 
  84 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
  85   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  86   assert(!current_thread_in_native(), "must not be in native");
  87   jobject res = NULL;
  88   if (!obj.is_null()) {
  89     // ignore null handles
  90     assert(oopDesc::is_oop(obj()), "not an oop");
  91     oop* ptr = global_handles()->allocate();
  92     // Return NULL on allocation failure.
  93     if (ptr != NULL) {
  94       assert(*ptr == NULL, "invariant");
  95       NativeAccess<>::oop_store(ptr, obj());
  96       res = reinterpret_cast<jobject>(ptr);
  97     } else {
  98       report_handle_allocation_failure(alloc_failmode, "global");
  99     }
 100   }
 101 
 102   return res;
 103 }
 104 
 105 
 106 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
 107   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 108   assert(!current_thread_in_native(), "must not be in native");
 109   jobject res = NULL;
 110   if (!obj.is_null()) {
 111     // ignore null handles
 112     assert(oopDesc::is_oop(obj()), "not an oop");
 113     oop* ptr = weak_global_handles()->allocate();
 114     // Return NULL on allocation failure.
 115     if (ptr != NULL) {
 116       assert(*ptr == NULL, "invariant");
 117       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
 118       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
 119       res = reinterpret_cast<jobject>(tptr);
 120     } else {
 121       report_handle_allocation_failure(alloc_failmode, "weak global");
 122     }
 123   }
 124   return res;
 125 }


 326 
 327 // There is a freelist of handles running through the JNIHandleBlock
 328 // with a tagged next pointer, distinguishing these next pointers from
 329 // oops. The freelist handling currently relies on the size of oops
 330 // being the same as a native pointer. If this ever changes, then
 331 // this freelist handling must change too.
 332 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
 333 
 334 #ifdef ASSERT
 335 void JNIHandleBlock::zap() {
 336   // Zap block values
 337   _top = 0;
 338   for (int index = 0; index < block_size_in_oops; index++) {
 339     // NOT using Access here; just bare clobbering to NULL, since the
 340     // block no longer contains valid oops.
 341     _handles[index] = 0;
 342   }
 343 }
 344 #endif // ASSERT
 345 
 346 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread)  {
 347   assert(thread == NULL || thread == Thread::current(), "sanity check");
 348   JNIHandleBlock* block;
 349   // Check the thread-local free list for a block so we don't
 350   // have to acquire a mutex.
 351   if (thread != NULL && thread->free_handle_block() != NULL) {
 352     block = thread->free_handle_block();
 353     thread->set_free_handle_block(block->_next);
 354   }
 355   else {
 356     // locking with safepoint checking introduces a potential deadlock:
 357     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 358     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 359     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 360     MutexLocker ml(JNIHandleBlockFreeList_lock,
 361                    Mutex::_no_safepoint_check_flag);
 362     if (_block_free_list == NULL) {
 363       // Allocate new block






 364       block = new JNIHandleBlock();

 365       _blocks_allocated++;
 366       block->zap();
 367       #ifndef PRODUCT
 368       // Link new block to list of all allocated blocks
 369       block->_block_list_link = _block_list;
 370       _block_list = block;
 371       #endif
 372     } else {
 373       // Get block from free list
 374       block = _block_free_list;
 375       _block_free_list = _block_free_list->_next;
 376     }
 377   }
 378   block->_top = 0;
 379   block->_next = NULL;
 380   block->_pop_frame_link = NULL;
 381   block->_planned_capacity = block_size_in_oops;
 382   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 383   debug_only(block->_last = NULL);
 384   debug_only(block->_free_list = NULL);


 444       for (int index = 0; index < current->_top; index++) {
 445         uintptr_t* addr = &(current->_handles)[index];
 446         uintptr_t value = *addr;
 447         // traverse heap pointers only, not deleted handles or free list
 448         // pointers
 449         if (value != 0 && !is_tagged_free_list(value)) {
 450           oop* root = (oop*)addr;
 451           f->do_oop(root);
 452         }
 453       }
 454       // the next handle block is valid only if current block is full
 455       if (current->_top < block_size_in_oops) {
 456         break;
 457       }
 458     }
 459     current_chain = current_chain->pop_frame_link();
 460   }
 461 }
 462 
 463 
 464 jobject JNIHandleBlock::allocate_handle(oop obj) {
 465   assert(Universe::heap()->is_in(obj), "sanity check");
 466   if (_top == 0) {
 467     // This is the first allocation or the initial block got zapped when
 468     // entering a native function. If we have any following blocks they are
 469     // not valid anymore.
 470     for (JNIHandleBlock* current = _next; current != NULL;
 471          current = current->_next) {
 472       assert(current->_last == NULL, "only first block should have _last set");
 473       assert(current->_free_list == NULL,
 474              "only first block should have _free_list set");
 475       if (current->_top == 0) {
 476         // All blocks after the first clear trailing block are already cleared.
 477 #ifdef ASSERT
 478         for (current = current->_next; current != NULL; current = current->_next) {
 479           assert(current->_top == 0, "trailing blocks must already be cleared");
 480         }
 481 #endif
 482         break;
 483       }
 484       current->_top = 0;


 492   }
 493 
 494   // Try last block
 495   if (_last->_top < block_size_in_oops) {
 496     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
 497     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 498     return (jobject) handle;
 499   }
 500 
 501   // Try free list
 502   if (_free_list != NULL) {
 503     oop* handle = (oop*)_free_list;
 504     _free_list = (uintptr_t*) untag_free_list(*_free_list);
 505     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 506     return (jobject) handle;
 507   }
 508   // Check if unused block follow last
 509   if (_last->_next != NULL) {
 510     // update last and retry
 511     _last = _last->_next;
 512     return allocate_handle(obj);
 513   }
 514 
 515   // No space available, we have to rebuild free list or expand
 516   if (_allocate_before_rebuild == 0) {
 517       rebuild_free_list();        // updates _allocate_before_rebuild counter
 518   } else {
 519     // Append new block
 520     Thread* thread = Thread::current();
 521     Handle obj_handle(thread, obj);
 522     // This can block, so we need to preserve obj across call.
 523     _last->_next = JNIHandleBlock::allocate_block(thread);



 524     _last = _last->_next;
 525     _allocate_before_rebuild--;
 526     obj = obj_handle();
 527   }
 528   return allocate_handle(obj);  // retry
 529 }
 530 
 531 void JNIHandleBlock::rebuild_free_list() {
 532   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 533   int free = 0;
 534   int blocks = 0;
 535   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 536     for (int index = 0; index < current->_top; index++) {
 537       uintptr_t* handle = &(current->_handles)[index];
 538       if (*handle == 0) {
 539         // this handle was cleared out by a delete call, reuse it
 540         *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
 541         _free_list = handle;
 542         free++;
 543       }
 544     }
 545     // we should not rebuild free list if there are unused handles at the end
 546     assert(current->_top == block_size_in_oops, "just checking");
 547     blocks++;
 548   }




  41   return _global_handles;
  42 }
  43 
  44 OopStorage* JNIHandles::weak_global_handles() {
  45   return _weak_global_handles;
  46 }
  47 
  48 // Serviceability agent support.
  49 OopStorage* JNIHandles::_global_handles = NULL;
  50 OopStorage* JNIHandles::_weak_global_handles = NULL;
  51 
  52 void jni_handles_init() {
  53   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global");
  54   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak");
  55 }
  56 
  57 jobject JNIHandles::make_local(oop obj) {
  58   return make_local(Thread::current(), obj);
  59 }
  60 
  61 // Used by NewLocalRef which requires NULL on out-of-memory
  62 jobject JNIHandles::make_local(Thread* thread, oop obj, AllocFailType alloc_failmode) {
  63   if (obj == NULL) {
  64     return NULL;                // ignore null handles
  65   } else {
  66     assert(oopDesc::is_oop(obj), "not an oop");
  67     assert(thread->is_Java_thread(), "not a Java thread");
  68     assert(!current_thread_in_native(), "must not be in native");
  69     return thread->active_handles()->allocate_handle(obj, alloc_failmode);
  70   }
  71 }
  72 
  73 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
  74                                              const char* handle_kind) {
  75   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  76     // Fake size value, since we don't know the min allocation size here.
  77     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
  78                           "Cannot create %s JNI handle", handle_kind);
  79   } else {
  80     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
  81   }
  82 }
  83 
  84 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
  85   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  86   assert(!current_thread_in_native(), "must not be in native");
  87   jobject res = NULL;
  88   if (!obj.is_null()) {
  89     // ignore null handles
  90     assert(oopDesc::is_oop(obj()), "not an oop");
  91     oop* ptr = global_handles()->allocate();
  92     // Return NULL on allocation failure.
  93     if (ptr != NULL) {
  94       assert(*ptr == NULL, "invariant");
  95       NativeAccess<>::oop_store(ptr, obj());
  96       res = reinterpret_cast<jobject>(ptr);
  97     } else {
  98       report_handle_allocation_failure(alloc_failmode, "global");
  99     }
 100   }
 101 
 102   return res;
 103 }
 104 

 105 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
 106   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 107   assert(!current_thread_in_native(), "must not be in native");
 108   jobject res = NULL;
 109   if (!obj.is_null()) {
 110     // ignore null handles
 111     assert(oopDesc::is_oop(obj()), "not an oop");
 112     oop* ptr = weak_global_handles()->allocate();
 113     // Return NULL on allocation failure.
 114     if (ptr != NULL) {
 115       assert(*ptr == NULL, "invariant");
 116       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
 117       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
 118       res = reinterpret_cast<jobject>(tptr);
 119     } else {
 120       report_handle_allocation_failure(alloc_failmode, "weak global");
 121     }
 122   }
 123   return res;
 124 }


 325 
 326 // There is a freelist of handles running through the JNIHandleBlock
 327 // with a tagged next pointer, distinguishing these next pointers from
 328 // oops. The freelist handling currently relies on the size of oops
 329 // being the same as a native pointer. If this ever changes, then
 330 // this freelist handling must change too.
 331 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
 332 
 333 #ifdef ASSERT
 334 void JNIHandleBlock::zap() {
 335   // Zap block values
 336   _top = 0;
 337   for (int index = 0; index < block_size_in_oops; index++) {
 338     // NOT using Access here; just bare clobbering to NULL, since the
 339     // block no longer contains valid oops.
 340     _handles[index] = 0;
 341   }
 342 }
 343 #endif // ASSERT
 344 
 345 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread, AllocFailType alloc_failmode)  {
 346   assert(thread == NULL || thread == Thread::current(), "sanity check");
 347   JNIHandleBlock* block;
 348   // Check the thread-local free list for a block so we don't
 349   // have to acquire a mutex.
 350   if (thread != NULL && thread->free_handle_block() != NULL) {
 351     block = thread->free_handle_block();
 352     thread->set_free_handle_block(block->_next);
 353   }
 354   else {
 355     // locking with safepoint checking introduces a potential deadlock:
 356     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 357     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 358     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 359     MutexLocker ml(JNIHandleBlockFreeList_lock,
 360                    Mutex::_no_safepoint_check_flag);
 361     if (_block_free_list == NULL) {
 362       // Allocate new block
 363       if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
 364         block = new (std::nothrow) JNIHandleBlock();
 365         if (block == NULL) {
 366           return NULL;
 367         }
 368       } else {
 369         block = new JNIHandleBlock();
 370       }
 371       _blocks_allocated++;
 372       block->zap();
 373       #ifndef PRODUCT
 374       // Link new block to list of all allocated blocks
 375       block->_block_list_link = _block_list;
 376       _block_list = block;
 377       #endif
 378     } else {
 379       // Get block from free list
 380       block = _block_free_list;
 381       _block_free_list = _block_free_list->_next;
 382     }
 383   }
 384   block->_top = 0;
 385   block->_next = NULL;
 386   block->_pop_frame_link = NULL;
 387   block->_planned_capacity = block_size_in_oops;
 388   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 389   debug_only(block->_last = NULL);
 390   debug_only(block->_free_list = NULL);


 450       for (int index = 0; index < current->_top; index++) {
 451         uintptr_t* addr = &(current->_handles)[index];
 452         uintptr_t value = *addr;
 453         // traverse heap pointers only, not deleted handles or free list
 454         // pointers
 455         if (value != 0 && !is_tagged_free_list(value)) {
 456           oop* root = (oop*)addr;
 457           f->do_oop(root);
 458         }
 459       }
 460       // the next handle block is valid only if current block is full
 461       if (current->_top < block_size_in_oops) {
 462         break;
 463       }
 464     }
 465     current_chain = current_chain->pop_frame_link();
 466   }
 467 }
 468 
 469 
 470 jobject JNIHandleBlock::allocate_handle(oop obj, AllocFailType alloc_failmode) {
 471   assert(Universe::heap()->is_in(obj), "sanity check");
 472   if (_top == 0) {
 473     // This is the first allocation or the initial block got zapped when
 474     // entering a native function. If we have any following blocks they are
 475     // not valid anymore.
 476     for (JNIHandleBlock* current = _next; current != NULL;
 477          current = current->_next) {
 478       assert(current->_last == NULL, "only first block should have _last set");
 479       assert(current->_free_list == NULL,
 480              "only first block should have _free_list set");
 481       if (current->_top == 0) {
 482         // All blocks after the first clear trailing block are already cleared.
 483 #ifdef ASSERT
 484         for (current = current->_next; current != NULL; current = current->_next) {
 485           assert(current->_top == 0, "trailing blocks must already be cleared");
 486         }
 487 #endif
 488         break;
 489       }
 490       current->_top = 0;


 498   }
 499 
 500   // Try last block
 501   if (_last->_top < block_size_in_oops) {
 502     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
 503     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 504     return (jobject) handle;
 505   }
 506 
 507   // Try free list
 508   if (_free_list != NULL) {
 509     oop* handle = (oop*)_free_list;
 510     _free_list = (uintptr_t*) untag_free_list(*_free_list);
 511     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 512     return (jobject) handle;
 513   }
 514   // Check if unused block follow last
 515   if (_last->_next != NULL) {
 516     // update last and retry
 517     _last = _last->_next;
 518     return allocate_handle(obj, alloc_failmode);
 519   }
 520 
 521   // No space available, we have to rebuild free list or expand
 522   if (_allocate_before_rebuild == 0) {
 523       rebuild_free_list();        // updates _allocate_before_rebuild counter
 524   } else {
 525     // Append new block
 526     Thread* thread = Thread::current();
 527     Handle obj_handle(thread, obj);
 528     // This can block, so we need to preserve obj across call.
 529     _last->_next = JNIHandleBlock::allocate_block(thread, alloc_failmode);
 530     if (_last->_next == NULL) {
 531       return NULL;
 532     }
 533     _last = _last->_next;
 534     _allocate_before_rebuild--;
 535     obj = obj_handle();
 536   }
 537   return allocate_handle(obj, alloc_failmode);  // retry
 538 }
 539 
 540 void JNIHandleBlock::rebuild_free_list() {
 541   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 542   int free = 0;
 543   int blocks = 0;
 544   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 545     for (int index = 0; index < current->_top; index++) {
 546       uintptr_t* handle = &(current->_handles)[index];
 547       if (*handle == 0) {
 548         // this handle was cleared out by a delete call, reuse it
 549         *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
 550         _free_list = handle;
 551         free++;
 552       }
 553     }
 554     // we should not rebuild free list if there are unused handles at the end
 555     assert(current->_top == block_size_in_oops, "just checking");
 556     blocks++;
 557   }


< prev index next >