< prev index next >

src/hotspot/share/runtime/jniHandles.cpp

Print this page




  41   return _global_handles;
  42 }
  43 
  44 OopStorage* JNIHandles::weak_global_handles() {
  45   return _weak_global_handles;
  46 }
  47 
  48 // Serviceability agent support.
  49 OopStorage* JNIHandles::_global_handles = NULL;
  50 OopStorage* JNIHandles::_weak_global_handles = NULL;
  51 
  52 void jni_handles_init() {
  53   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global");
  54   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak");
  55 }
  56 
  57 jobject JNIHandles::make_local(oop obj) {
  58   return make_local(Thread::current(), obj);
  59 }
  60 
  61 
  62 jobject JNIHandles::make_local(Thread* thread, oop obj) {













  63   if (obj == NULL) {
  64     return NULL;                // ignore null handles
  65   } else {
  66     assert(oopDesc::is_oop(obj), "not an oop");
  67     assert(thread->is_Java_thread(), "not a Java thread");
  68     assert(!current_thread_in_native(), "must not be in native");
  69     return thread->active_handles()->allocate_handle(obj);
  70   }
  71 }
  72 
  73 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
  74                                              const char* handle_kind) {
  75   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  76     // Fake size value, since we don't know the min allocation size here.
  77     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
  78                           "Cannot create %s JNI handle", handle_kind);
  79   } else {
  80     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
  81   }
  82 }
  83 



  84 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
  85   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  86   assert(!current_thread_in_native(), "must not be in native");











  87   jobject res = NULL;
  88   if (!obj.is_null()) {
  89     // ignore null handles
  90     assert(oopDesc::is_oop(obj()), "not an oop");
  91     oop* ptr = global_handles()->allocate();
  92     // Return NULL on allocation failure.
  93     if (ptr != NULL) {
  94       assert(*ptr == NULL, "invariant");
  95       NativeAccess<>::oop_store(ptr, obj());
  96       res = reinterpret_cast<jobject>(ptr);
  97     } else {
  98       report_handle_allocation_failure(alloc_failmode, "global");
  99     }
 100   }
 101 
 102   return res;
 103 }
 104 


 105 
 106 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
 107   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 108   assert(!current_thread_in_native(), "must not be in native");











 109   jobject res = NULL;
 110   if (!obj.is_null()) {
 111     // ignore null handles
 112     assert(oopDesc::is_oop(obj()), "not an oop");
 113     oop* ptr = weak_global_handles()->allocate();
 114     // Return NULL on allocation failure.
 115     if (ptr != NULL) {
 116       assert(*ptr == NULL, "invariant");
 117       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
 118       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
 119       res = reinterpret_cast<jobject>(tptr);
 120     } else {
 121       report_handle_allocation_failure(alloc_failmode, "weak global");
 122     }
 123   }
 124   return res;
 125 }
 126 
 127 // Resolve some erroneous cases to NULL, rather than treating them as
 128 // possibly unchecked errors.  In particular, deleted handles are


 326 
 327 // There is a freelist of handles running through the JNIHandleBlock
 328 // with a tagged next pointer, distinguishing these next pointers from
 329 // oops. The freelist handling currently relies on the size of oops
 330 // being the same as a native pointer. If this ever changes, then
 331 // this freelist handling must change too.
 332 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
 333 
 334 #ifdef ASSERT
 335 void JNIHandleBlock::zap() {
 336   // Zap block values
 337   _top = 0;
 338   for (int index = 0; index < block_size_in_oops; index++) {
 339     // NOT using Access here; just bare clobbering to NULL, since the
 340     // block no longer contains valid oops.
 341     _handles[index] = 0;
 342   }
 343 }
 344 #endif // ASSERT
 345 
 346 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread)  {
 347   assert(thread == NULL || thread == Thread::current(), "sanity check");
 348   JNIHandleBlock* block;
 349   // Check the thread-local free list for a block so we don't
 350   // have to acquire a mutex.
 351   if (thread != NULL && thread->free_handle_block() != NULL) {
 352     block = thread->free_handle_block();
 353     thread->set_free_handle_block(block->_next);
 354   }
 355   else {
 356     // locking with safepoint checking introduces a potential deadlock:
 357     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 358     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 359     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 360     MutexLocker ml(JNIHandleBlockFreeList_lock,
 361                    Mutex::_no_safepoint_check_flag);
 362     if (_block_free_list == NULL) {
 363       // Allocate new block






 364       block = new JNIHandleBlock();

 365       _blocks_allocated++;
 366       block->zap();
 367       #ifndef PRODUCT
 368       // Link new block to list of all allocated blocks
 369       block->_block_list_link = _block_list;
 370       _block_list = block;
 371       #endif
 372     } else {
 373       // Get block from free list
 374       block = _block_free_list;
 375       _block_free_list = _block_free_list->_next;
 376     }
 377   }
 378   block->_top = 0;
 379   block->_next = NULL;
 380   block->_pop_frame_link = NULL;
 381   block->_planned_capacity = block_size_in_oops;
 382   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 383   debug_only(block->_last = NULL);
 384   debug_only(block->_free_list = NULL);


 444       for (int index = 0; index < current->_top; index++) {
 445         uintptr_t* addr = &(current->_handles)[index];
 446         uintptr_t value = *addr;
 447         // traverse heap pointers only, not deleted handles or free list
 448         // pointers
 449         if (value != 0 && !is_tagged_free_list(value)) {
 450           oop* root = (oop*)addr;
 451           f->do_oop(root);
 452         }
 453       }
 454       // the next handle block is valid only if current block is full
 455       if (current->_top < block_size_in_oops) {
 456         break;
 457       }
 458     }
 459     current_chain = current_chain->pop_frame_link();
 460   }
 461 }
 462 
 463 
 464 jobject JNIHandleBlock::allocate_handle(oop obj) {
 465   assert(Universe::heap()->is_in(obj), "sanity check");
 466   if (_top == 0) {
 467     // This is the first allocation or the initial block got zapped when
 468     // entering a native function. If we have any following blocks they are
 469     // not valid anymore.
 470     for (JNIHandleBlock* current = _next; current != NULL;
 471          current = current->_next) {
 472       assert(current->_last == NULL, "only first block should have _last set");
 473       assert(current->_free_list == NULL,
 474              "only first block should have _free_list set");
 475       if (current->_top == 0) {
 476         // All blocks after the first clear trailing block are already cleared.
 477 #ifdef ASSERT
 478         for (current = current->_next; current != NULL; current = current->_next) {
 479           assert(current->_top == 0, "trailing blocks must already be cleared");
 480         }
 481 #endif
 482         break;
 483       }
 484       current->_top = 0;


 492   }
 493 
 494   // Try last block
 495   if (_last->_top < block_size_in_oops) {
 496     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
 497     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 498     return (jobject) handle;
 499   }
 500 
 501   // Try free list
 502   if (_free_list != NULL) {
 503     oop* handle = (oop*)_free_list;
 504     _free_list = (uintptr_t*) untag_free_list(*_free_list);
 505     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 506     return (jobject) handle;
 507   }
 508   // Check if unused block follow last
 509   if (_last->_next != NULL) {
 510     // update last and retry
 511     _last = _last->_next;
 512     return allocate_handle(obj);
 513   }
 514 
 515   // No space available, we have to rebuild free list or expand
 516   if (_allocate_before_rebuild == 0) {
 517       rebuild_free_list();        // updates _allocate_before_rebuild counter
 518   } else {
 519     // Append new block
 520     Thread* thread = Thread::current();
 521     Handle obj_handle(thread, obj);
 522     // This can block, so we need to preserve obj across call.
 523     _last->_next = JNIHandleBlock::allocate_block(thread);



 524     _last = _last->_next;
 525     _allocate_before_rebuild--;
 526     obj = obj_handle();
 527   }
 528   return allocate_handle(obj);  // retry
 529 }
 530 
 531 void JNIHandleBlock::rebuild_free_list() {
 532   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 533   int free = 0;
 534   int blocks = 0;
 535   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 536     for (int index = 0; index < current->_top; index++) {
 537       uintptr_t* handle = &(current->_handles)[index];
 538       if (*handle == 0) {
 539         // this handle was cleared out by a delete call, reuse it
 540         *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
 541         _free_list = handle;
 542         free++;
 543       }
 544     }
 545     // we should not rebuild free list if there are unused handles at the end
 546     assert(current->_top == block_size_in_oops, "just checking");
 547     blocks++;
 548   }




  41   return _global_handles;
  42 }
  43 
  44 OopStorage* JNIHandles::weak_global_handles() {
  45   return _weak_global_handles;
  46 }
  47 
  48 // Serviceability agent support.
  49 OopStorage* JNIHandles::_global_handles = NULL;
  50 OopStorage* JNIHandles::_weak_global_handles = NULL;
  51 
  52 void jni_handles_init() {
  53   JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global");
  54   JNIHandles::_weak_global_handles = OopStorageSet::create_weak("JNI Weak");
  55 }
  56 
  57 jobject JNIHandles::make_local(oop obj) {
  58   return make_local(Thread::current(), obj);
  59 }
  60 
  61 static int make_local_count = 0;
  62 static const int MAKE_LOCAL_MAX = 100;
  63 // Used by NewLocalRef which requires NULL on out-of-memory
  64 jobject JNIHandles::make_local(Thread* thread, oop obj, AllocFailType alloc_failmode) {
  65   if (UseNewCode && alloc_failmode == AllocFailStrategy::RETURN_NULL) {
  66     int tmp = ++make_local_count;
  67     if (Verbose) tty->print_cr("COUNT: make_local = %d", tmp);
  68     if (UseNewCode2) {
  69       if (tmp >= MAKE_LOCAL_MAX) {
  70         tty->print_cr("make_local returning NULL");
  71         make_local_count = 0; // need to reset
  72         return NULL;
  73       }
  74     }
  75   }
  76   if (obj == NULL) {
  77     return NULL;                // ignore null handles
  78   } else {
  79     assert(oopDesc::is_oop(obj), "not an oop");
  80     assert(thread->is_Java_thread(), "not a Java thread");
  81     assert(!current_thread_in_native(), "must not be in native");
  82     return thread->active_handles()->allocate_handle(obj, alloc_failmode);
  83   }
  84 }
  85 
  86 static void report_handle_allocation_failure(AllocFailType alloc_failmode,
  87                                              const char* handle_kind) {
  88   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  89     // Fake size value, since we don't know the min allocation size here.
  90     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
  91                           "Cannot create %s JNI handle", handle_kind);
  92   } else {
  93     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
  94   }
  95 }
  96 
  97 static int make_global_count = 0;
  98 static const int MAKE_GLOBAL_MAX = 350;
  99 
 100 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 101   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 102   assert(!current_thread_in_native(), "must not be in native");
 103   if (UseNewCode) {
 104     int tmp = ++make_global_count;
 105     if (Verbose) tty->print_cr("COUNT: make_global = %d", tmp);
 106     if (UseNewCode2 && alloc_failmode == AllocFailStrategy::RETURN_NULL) {
 107       if (tmp >= MAKE_GLOBAL_MAX) {
 108         tty->print_cr("make_global returning NULL");
 109         make_global_count = 0; // need to reset
 110         return NULL;
 111       }
 112     }
 113   }
 114   jobject res = NULL;
 115   if (!obj.is_null()) {
 116     // ignore null handles
 117     assert(oopDesc::is_oop(obj()), "not an oop");
 118     oop* ptr = global_handles()->allocate();
 119     // Return NULL on allocation failure.
 120     if (ptr != NULL) {
 121       assert(*ptr == NULL, "invariant");
 122       NativeAccess<>::oop_store(ptr, obj());
 123       res = reinterpret_cast<jobject>(ptr);
 124     } else {
 125       report_handle_allocation_failure(alloc_failmode, "global");
 126     }
 127   }
 128 
 129   return res;
 130 }
 131 
 132 static int make_weak_count = 0;
 133 static const int MAKE_WEAK_MAX = 90;
 134 
 135 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
 136   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 137   assert(!current_thread_in_native(), "must not be in native");
 138   if (UseNewCode) {
 139     int tmp = ++make_weak_count;
 140     if (Verbose) tty->print_cr("COUNT: make_weak = %d", tmp);
 141     if (UseNewCode2 && alloc_failmode == AllocFailStrategy::RETURN_NULL) {
 142       if (tmp >= MAKE_WEAK_MAX) {
 143         tty->print_cr("make_weak returning NULL");
 144         make_weak_count = 0; // need to reset
 145         return NULL;
 146       }
 147     }
 148   }
 149   jobject res = NULL;
 150   if (!obj.is_null()) {
 151     // ignore null handles
 152     assert(oopDesc::is_oop(obj()), "not an oop");
 153     oop* ptr = weak_global_handles()->allocate();
 154     // Return NULL on allocation failure.
 155     if (ptr != NULL) {
 156       assert(*ptr == NULL, "invariant");
 157       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
 158       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
 159       res = reinterpret_cast<jobject>(tptr);
 160     } else {
 161       report_handle_allocation_failure(alloc_failmode, "weak global");
 162     }
 163   }
 164   return res;
 165 }
 166 
 167 // Resolve some erroneous cases to NULL, rather than treating them as
 168 // possibly unchecked errors.  In particular, deleted handles are


 366 
 367 // There is a freelist of handles running through the JNIHandleBlock
 368 // with a tagged next pointer, distinguishing these next pointers from
 369 // oops. The freelist handling currently relies on the size of oops
 370 // being the same as a native pointer. If this ever changes, then
 371 // this freelist handling must change too.
 372 STATIC_ASSERT(sizeof(oop) == sizeof(uintptr_t));
 373 
 374 #ifdef ASSERT
 375 void JNIHandleBlock::zap() {
 376   // Zap block values
 377   _top = 0;
 378   for (int index = 0; index < block_size_in_oops; index++) {
 379     // NOT using Access here; just bare clobbering to NULL, since the
 380     // block no longer contains valid oops.
 381     _handles[index] = 0;
 382   }
 383 }
 384 #endif // ASSERT
 385 
 386 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread, AllocFailType alloc_failmode)  {
 387   assert(thread == NULL || thread == Thread::current(), "sanity check");
 388   JNIHandleBlock* block;
 389   // Check the thread-local free list for a block so we don't
 390   // have to acquire a mutex.
 391   if (thread != NULL && thread->free_handle_block() != NULL) {
 392     block = thread->free_handle_block();
 393     thread->set_free_handle_block(block->_next);
 394   }
 395   else {
 396     // locking with safepoint checking introduces a potential deadlock:
 397     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 398     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 399     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 400     MutexLocker ml(JNIHandleBlockFreeList_lock,
 401                    Mutex::_no_safepoint_check_flag);
 402     if (_block_free_list == NULL) {
 403       // Allocate new block
 404       if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
 405         block = new (std::nothrow) JNIHandleBlock();
 406         if (block == NULL) {
 407           return NULL;
 408         }
 409       } else {
 410         block = new JNIHandleBlock();
 411       }
 412       _blocks_allocated++;
 413       block->zap();
 414       #ifndef PRODUCT
 415       // Link new block to list of all allocated blocks
 416       block->_block_list_link = _block_list;
 417       _block_list = block;
 418       #endif
 419     } else {
 420       // Get block from free list
 421       block = _block_free_list;
 422       _block_free_list = _block_free_list->_next;
 423     }
 424   }
 425   block->_top = 0;
 426   block->_next = NULL;
 427   block->_pop_frame_link = NULL;
 428   block->_planned_capacity = block_size_in_oops;
 429   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 430   debug_only(block->_last = NULL);
 431   debug_only(block->_free_list = NULL);


 491       for (int index = 0; index < current->_top; index++) {
 492         uintptr_t* addr = &(current->_handles)[index];
 493         uintptr_t value = *addr;
 494         // traverse heap pointers only, not deleted handles or free list
 495         // pointers
 496         if (value != 0 && !is_tagged_free_list(value)) {
 497           oop* root = (oop*)addr;
 498           f->do_oop(root);
 499         }
 500       }
 501       // the next handle block is valid only if current block is full
 502       if (current->_top < block_size_in_oops) {
 503         break;
 504       }
 505     }
 506     current_chain = current_chain->pop_frame_link();
 507   }
 508 }
 509 
 510 
 511 jobject JNIHandleBlock::allocate_handle(oop obj, AllocFailType alloc_failmode) {
 512   assert(Universe::heap()->is_in(obj), "sanity check");
 513   if (_top == 0) {
 514     // This is the first allocation or the initial block got zapped when
 515     // entering a native function. If we have any following blocks they are
 516     // not valid anymore.
 517     for (JNIHandleBlock* current = _next; current != NULL;
 518          current = current->_next) {
 519       assert(current->_last == NULL, "only first block should have _last set");
 520       assert(current->_free_list == NULL,
 521              "only first block should have _free_list set");
 522       if (current->_top == 0) {
 523         // All blocks after the first clear trailing block are already cleared.
 524 #ifdef ASSERT
 525         for (current = current->_next; current != NULL; current = current->_next) {
 526           assert(current->_top == 0, "trailing blocks must already be cleared");
 527         }
 528 #endif
 529         break;
 530       }
 531       current->_top = 0;


 539   }
 540 
 541   // Try last block
 542   if (_last->_top < block_size_in_oops) {
 543     oop* handle = (oop*)&(_last->_handles)[_last->_top++];
 544     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 545     return (jobject) handle;
 546   }
 547 
 548   // Try free list
 549   if (_free_list != NULL) {
 550     oop* handle = (oop*)_free_list;
 551     _free_list = (uintptr_t*) untag_free_list(*_free_list);
 552     NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj);
 553     return (jobject) handle;
 554   }
 555   // Check if unused block follow last
 556   if (_last->_next != NULL) {
 557     // update last and retry
 558     _last = _last->_next;
 559     return allocate_handle(obj, alloc_failmode);
 560   }
 561 
 562   // No space available, we have to rebuild free list or expand
 563   if (_allocate_before_rebuild == 0) {
 564       rebuild_free_list();        // updates _allocate_before_rebuild counter
 565   } else {
 566     // Append new block
 567     Thread* thread = Thread::current();
 568     Handle obj_handle(thread, obj);
 569     // This can block, so we need to preserve obj across call.
 570     _last->_next = JNIHandleBlock::allocate_block(thread, alloc_failmode);
 571     if (_last->_next == NULL) {
 572       return NULL;
 573     }
 574     _last = _last->_next;
 575     _allocate_before_rebuild--;
 576     obj = obj_handle();
 577   }
 578   return allocate_handle(obj, alloc_failmode);  // retry
 579 }
 580 
 581 void JNIHandleBlock::rebuild_free_list() {
 582   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 583   int free = 0;
 584   int blocks = 0;
 585   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 586     for (int index = 0; index < current->_top; index++) {
 587       uintptr_t* handle = &(current->_handles)[index];
 588       if (*handle == 0) {
 589         // this handle was cleared out by a delete call, reuse it
 590         *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
 591         _free_list = handle;
 592         free++;
 593       }
 594     }
 595     // we should not rebuild free list if there are unused handles at the end
 596     assert(current->_top == block_size_in_oops, "just checking");
 597     blocks++;
 598   }


< prev index next >