< prev index next >

src/hotspot/share/runtime/jniHandles.cpp

Print this page
rev 50534 : [mq]: rename_RootAccess


  92   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  93     // Fake size value, since we don't know the min allocation size here.
  94     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
  95                           "Cannot create %s JNI handle", handle_kind);
  96   } else {
  97     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
  98   }
  99 }
 100 
 101 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 102   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 103   assert(!current_thread_in_native(), "must not be in native");
 104   jobject res = NULL;
 105   if (!obj.is_null()) {
 106     // ignore null handles
 107     assert(oopDesc::is_oop(obj()), "not an oop");
 108     oop* ptr = global_handles()->allocate();
 109     // Return NULL on allocation failure.
 110     if (ptr != NULL) {
 111       assert(*ptr == NULL, "invariant");
 112       RootAccess<IN_CONCURRENT_ROOT>::oop_store(ptr, obj());
 113       res = reinterpret_cast<jobject>(ptr);
 114     } else {
 115       report_handle_allocation_failure(alloc_failmode, "global");
 116     }
 117   } else {
 118     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
 119   }
 120 
 121   return res;
 122 }
 123 
 124 
 125 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
 126   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 127   assert(!current_thread_in_native(), "must not be in native");
 128   jobject res = NULL;
 129   if (!obj.is_null()) {
 130     // ignore null handles
 131     assert(oopDesc::is_oop(obj()), "not an oop");
 132     oop* ptr = weak_global_handles()->allocate();
 133     // Return NULL on allocation failure.
 134     if (ptr != NULL) {
 135       assert(*ptr == NULL, "invariant");
 136       RootAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
 137       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
 138       res = reinterpret_cast<jobject>(tptr);
 139     } else {
 140       report_handle_allocation_failure(alloc_failmode, "weak global");
 141     }
 142   } else {
 143     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
 144   }
 145   return res;
 146 }
 147 
 148 // Resolve some erroneous cases to NULL, rather than treating them as
 149 // possibly unchecked errors.  In particular, deleted handles are
 150 // treated as NULL (though a deleted and later reallocated handle
 151 // isn't detected).
 152 oop JNIHandles::resolve_external_guard(jobject handle) {
 153   oop result = NULL;
 154   if (handle != NULL) {
 155     result = resolve_impl<true /* external_guard */ >(handle);
 156   }
 157   return result;
 158 }
 159 
 160 oop JNIHandles::resolve_jweak(jweak handle) {
 161   assert(handle != NULL, "precondition");
 162   assert(is_jweak(handle), "precondition");
 163   return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(jweak_ptr(handle));
 164 }
 165 
 166 bool JNIHandles::is_global_weak_cleared(jweak handle) {
 167   assert(handle != NULL, "precondition");
 168   assert(is_jweak(handle), "not a weak handle");
 169   oop* oop_ptr = jweak_ptr(handle);
 170   oop value = RootAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
 171   return value == NULL;
 172 }
 173 
 174 void JNIHandles::destroy_global(jobject handle) {
 175   if (handle != NULL) {
 176     assert(!is_jweak(handle), "wrong method for detroying jweak");
 177     oop* oop_ptr = jobject_ptr(handle);
 178     RootAccess<IN_CONCURRENT_ROOT>::oop_store(oop_ptr, (oop)NULL);
 179     global_handles()->release(oop_ptr);
 180   }
 181 }
 182 
 183 
 184 void JNIHandles::destroy_weak_global(jobject handle) {
 185   if (handle != NULL) {
 186     assert(is_jweak(handle), "JNI handle not jweak");
 187     oop* oop_ptr = jweak_ptr(handle);
 188     RootAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
 189     weak_global_handles()->release(oop_ptr);
 190   }
 191 }
 192 
 193 
 194 void JNIHandles::oops_do(OopClosure* f) {
 195   global_handles()->oops_do(f);
 196 }
 197 
 198 
 199 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 200   weak_global_handles()->weak_oops_do(is_alive, f);
 201 }
 202 
 203 
 204 void JNIHandles::weak_oops_do(OopClosure* f) {
 205   weak_global_handles()->weak_oops_do(f);
 206 }
 207 
 208 


 500 #ifdef ASSERT
 501         for (current = current->_next; current != NULL; current = current->_next) {
 502           assert(current->_top == 0, "trailing blocks must already be cleared");
 503         }
 504 #endif
 505         break;
 506       }
 507       current->_top = 0;
 508       current->zap();
 509     }
 510     // Clear initial block
 511     _free_list = NULL;
 512     _allocate_before_rebuild = 0;
 513     _last = this;
 514     zap();
 515   }
 516 
 517   // Try last block
 518   if (_last->_top < block_size_in_oops) {
 519     oop* handle = &(_last->_handles)[_last->_top++];
 520     RootAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
 521     return (jobject) handle;
 522   }
 523 
 524   // Try free list
 525   if (_free_list != NULL) {
 526     oop* handle = _free_list;
 527     _free_list = (oop*) *_free_list;
 528     RootAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
 529     return (jobject) handle;
 530   }
 531   // Check if unused block follow last
 532   if (_last->_next != NULL) {
 533     // update last and retry
 534     _last = _last->_next;
 535     return allocate_handle(obj);
 536   }
 537 
 538   // No space available, we have to rebuild free list or expand
 539   if (_allocate_before_rebuild == 0) {
 540       rebuild_free_list();        // updates _allocate_before_rebuild counter
 541   } else {
 542     // Append new block
 543     Thread* thread = Thread::current();
 544     Handle obj_handle(thread, obj);
 545     // This can block, so we need to preserve obj across call.
 546     _last->_next = JNIHandleBlock::allocate_block(thread);
 547     _last = _last->_next;
 548     _allocate_before_rebuild--;




  92   if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
  93     // Fake size value, since we don't know the min allocation size here.
  94     vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
  95                           "Cannot create %s JNI handle", handle_kind);
  96   } else {
  97     assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
  98   }
  99 }
 100 
 101 jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
 102   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 103   assert(!current_thread_in_native(), "must not be in native");
 104   jobject res = NULL;
 105   if (!obj.is_null()) {
 106     // ignore null handles
 107     assert(oopDesc::is_oop(obj()), "not an oop");
 108     oop* ptr = global_handles()->allocate();
 109     // Return NULL on allocation failure.
 110     if (ptr != NULL) {
 111       assert(*ptr == NULL, "invariant");
 112       NativeAccess<IN_CONCURRENT_ROOT>::oop_store(ptr, obj());
 113       res = reinterpret_cast<jobject>(ptr);
 114     } else {
 115       report_handle_allocation_failure(alloc_failmode, "global");
 116     }
 117   } else {
 118     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
 119   }
 120 
 121   return res;
 122 }
 123 
 124 
 125 jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
 126   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
 127   assert(!current_thread_in_native(), "must not be in native");
 128   jobject res = NULL;
 129   if (!obj.is_null()) {
 130     // ignore null handles
 131     assert(oopDesc::is_oop(obj()), "not an oop");
 132     oop* ptr = weak_global_handles()->allocate();
 133     // Return NULL on allocation failure.
 134     if (ptr != NULL) {
 135       assert(*ptr == NULL, "invariant");
 136       NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
 137       char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
 138       res = reinterpret_cast<jobject>(tptr);
 139     } else {
 140       report_handle_allocation_failure(alloc_failmode, "weak global");
 141     }
 142   } else {
 143     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
 144   }
 145   return res;
 146 }
 147 
 148 // Resolve some erroneous cases to NULL, rather than treating them as
 149 // possibly unchecked errors.  In particular, deleted handles are
 150 // treated as NULL (though a deleted and later reallocated handle
 151 // isn't detected).
 152 oop JNIHandles::resolve_external_guard(jobject handle) {
 153   oop result = NULL;
 154   if (handle != NULL) {
 155     result = resolve_impl<true /* external_guard */ >(handle);
 156   }
 157   return result;
 158 }
 159 
 160 oop JNIHandles::resolve_jweak(jweak handle) {
 161   assert(handle != NULL, "precondition");
 162   assert(is_jweak(handle), "precondition");
 163   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(jweak_ptr(handle));
 164 }
 165 
 166 bool JNIHandles::is_global_weak_cleared(jweak handle) {
 167   assert(handle != NULL, "precondition");
 168   assert(is_jweak(handle), "not a weak handle");
 169   oop* oop_ptr = jweak_ptr(handle);
 170   oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
 171   return value == NULL;
 172 }
 173 
 174 void JNIHandles::destroy_global(jobject handle) {
 175   if (handle != NULL) {
 176     assert(!is_jweak(handle), "wrong method for detroying jweak");
 177     oop* oop_ptr = jobject_ptr(handle);
 178     NativeAccess<IN_CONCURRENT_ROOT>::oop_store(oop_ptr, (oop)NULL);
 179     global_handles()->release(oop_ptr);
 180   }
 181 }
 182 
 183 
 184 void JNIHandles::destroy_weak_global(jobject handle) {
 185   if (handle != NULL) {
 186     assert(is_jweak(handle), "JNI handle not jweak");
 187     oop* oop_ptr = jweak_ptr(handle);
 188     NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
 189     weak_global_handles()->release(oop_ptr);
 190   }
 191 }
 192 
 193 
 194 void JNIHandles::oops_do(OopClosure* f) {
 195   global_handles()->oops_do(f);
 196 }
 197 
 198 
 199 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 200   weak_global_handles()->weak_oops_do(is_alive, f);
 201 }
 202 
 203 
 204 void JNIHandles::weak_oops_do(OopClosure* f) {
 205   weak_global_handles()->weak_oops_do(f);
 206 }
 207 
 208 


 500 #ifdef ASSERT
 501         for (current = current->_next; current != NULL; current = current->_next) {
 502           assert(current->_top == 0, "trailing blocks must already be cleared");
 503         }
 504 #endif
 505         break;
 506       }
 507       current->_top = 0;
 508       current->zap();
 509     }
 510     // Clear initial block
 511     _free_list = NULL;
 512     _allocate_before_rebuild = 0;
 513     _last = this;
 514     zap();
 515   }
 516 
 517   // Try last block
 518   if (_last->_top < block_size_in_oops) {
 519     oop* handle = &(_last->_handles)[_last->_top++];
 520     NativeAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
 521     return (jobject) handle;
 522   }
 523 
 524   // Try free list
 525   if (_free_list != NULL) {
 526     oop* handle = _free_list;
 527     _free_list = (oop*) *_free_list;
 528     NativeAccess<AS_DEST_NOT_INITIALIZED>::oop_store(handle, obj);
 529     return (jobject) handle;
 530   }
 531   // Check if unused block follow last
 532   if (_last->_next != NULL) {
 533     // update last and retry
 534     _last = _last->_next;
 535     return allocate_handle(obj);
 536   }
 537 
 538   // No space available, we have to rebuild free list or expand
 539   if (_allocate_before_rebuild == 0) {
 540       rebuild_free_list();        // updates _allocate_before_rebuild counter
 541   } else {
 542     // Append new block
 543     Thread* thread = Thread::current();
 544     Handle obj_handle(thread, obj);
 545     // This can block, so we need to preserve obj across call.
 546     _last->_next = JNIHandleBlock::allocate_block(thread);
 547     _last = _last->_next;
 548     _allocate_before_rebuild--;


< prev index next >