1 /*
   2  * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/iterator.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/jniHandles.hpp"
  31 #include "runtime/mutexLocker.hpp"
  32 #include "runtime/thread.inline.hpp"
  33 #include "trace/traceMacros.hpp"
  34 #include "utilities/align.hpp"
  35 #if INCLUDE_ALL_GCS
  36 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  37 #endif
  38 
  39 JNIHandleBlock* JNIHandles::_global_handles       = NULL;
  40 JNIHandleBlock* JNIHandles::_weak_global_handles  = NULL;
  41 oop             JNIHandles::_deleted_handle       = NULL;
  42 
  43 
  44 jobject JNIHandles::make_local(oop obj) {
  45   if (obj == NULL) {
  46     return NULL;                // ignore null handles
  47   } else {
  48     Thread* thread = Thread::current();
  49     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  50     return thread->active_handles()->allocate_handle(obj);
  51   }
  52 }
  53 
  54 
  55 // optimized versions
  56 
  57 jobject JNIHandles::make_local(Thread* thread, oop obj) {
  58   if (obj == NULL) {
  59     return NULL;                // ignore null handles
  60   } else {
  61     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  62     return thread->active_handles()->allocate_handle(obj);
  63   }
  64 }
  65 
  66 
  67 jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
  68   if (obj == NULL) {
  69     return NULL;                // ignore null handles
  70   } else {
  71     JavaThread* thread = JavaThread::thread_from_jni_environment(env);
  72     assert(Universe::heap()->is_in_reserved(obj), "sanity check");
  73     return thread->active_handles()->allocate_handle(obj);
  74   }
  75 }
  76 
  77 
  78 jobject JNIHandles::make_global(Handle obj) {
  79   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  80   jobject res = NULL;
  81   if (!obj.is_null()) {
  82     // ignore null handles
  83     MutexLocker ml(JNIGlobalHandle_lock);
  84     assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
  85     res = _global_handles->allocate_handle(obj());
  86   } else {
  87     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  88   }
  89 
  90   return res;
  91 }
  92 
  93 
  94 jobject JNIHandles::make_weak_global(Handle obj) {
  95   assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
  96   jobject res = NULL;
  97   if (!obj.is_null()) {
  98     // ignore null handles
  99     {
 100       MutexLocker ml(JNIGlobalHandle_lock);
 101       assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
 102       res = _weak_global_handles->allocate_handle(obj());
 103     }
 104     // Add weak tag.
 105     assert(is_aligned(res, weak_tag_alignment), "invariant");
 106     char* tptr = reinterpret_cast<char*>(res) + weak_tag_value;
 107     res = reinterpret_cast<jobject>(tptr);
 108   } else {
 109     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
 110   }
 111   return res;
 112 }
 113 
 114 template<bool external_guard>
 115 oop JNIHandles::resolve_jweak(jweak handle) {
 116   assert(is_jweak(handle), "precondition");
 117   oop result = jweak_ref(handle);
 118   result = guard_value<external_guard>(result);
 119 #if INCLUDE_ALL_GCS
 120   if (result != NULL && UseG1GC) {
 121     G1SATBCardTableModRefBS::enqueue(result);
 122   }
 123 #endif // INCLUDE_ALL_GCS
 124   return result;
 125 }
 126 
 127 template oop JNIHandles::resolve_jweak<true>(jweak);
 128 template oop JNIHandles::resolve_jweak<false>(jweak);
 129 
 130 bool JNIHandles::is_global_weak_cleared(jweak handle) {
 131   assert(is_jweak(handle), "not a weak handle");
 132   return guard_value<false>(jweak_ref(handle)) == NULL;
 133 }
 134 
 135 void JNIHandles::destroy_global(jobject handle) {
 136   if (handle != NULL) {
 137     assert(is_global_handle(handle), "Invalid delete of global JNI handle");
 138     jobject_ref(handle) = deleted_handle();
 139   }
 140 }
 141 
 142 
 143 void JNIHandles::destroy_weak_global(jobject handle) {
 144   if (handle != NULL) {
 145     jweak_ref(handle) = deleted_handle();
 146   }
 147 }
 148 
 149 
 150 void JNIHandles::oops_do(OopClosure* f) {
 151   f->do_oop(&_deleted_handle);
 152   _global_handles->oops_do(f);
 153 }
 154 
 155 
 156 void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
 157   _weak_global_handles->weak_oops_do(is_alive, f);
 158 }
 159 
 160 
 161 void JNIHandles::weak_oops_do(OopClosure* f) {
 162   AlwaysTrueClosure always_true;
 163   weak_oops_do(&always_true, f);
 164 }
 165 
 166 
 167 void JNIHandles::initialize() {
 168   _global_handles      = JNIHandleBlock::allocate_block();
 169   _weak_global_handles = JNIHandleBlock::allocate_block();
 170   EXCEPTION_MARK;
 171   // We will never reach the CATCH below since Exceptions::_throw will cause
 172   // the VM to exit if an exception is thrown during initialization
 173   Klass* k      = SystemDictionary::Object_klass();
 174   _deleted_handle = InstanceKlass::cast(k)->allocate_instance(CATCH);
 175 }
 176 
 177 
 178 bool JNIHandles::is_local_handle(Thread* thread, jobject handle) {
 179   JNIHandleBlock* block = thread->active_handles();
 180 
 181   // Look back past possible native calls to jni_PushLocalFrame.
 182   while (block != NULL) {
 183     if (block->chain_contains(handle)) {
 184       return true;
 185     }
 186     block = block->pop_frame_link();
 187   }
 188   return false;
 189 }
 190 
 191 
 192 // Determine if the handle is somewhere in the current thread's stack.
 193 // We easily can't isolate any particular stack frame the handle might
 194 // come from, so we'll check the whole stack.
 195 
 196 bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) {
 197   // If there is no java frame, then this must be top level code, such
 198   // as the java command executable, in which case, this type of handle
 199   // is not permitted.
 200   return (thr->has_last_Java_frame() &&
 201          (void*)obj < (void*)thr->stack_base() &&
 202          (void*)obj >= (void*)thr->last_Java_sp());
 203 }
 204 
 205 
 206 bool JNIHandles::is_global_handle(jobject handle) {
 207   return _global_handles->chain_contains(handle);
 208 }
 209 
 210 
 211 bool JNIHandles::is_weak_global_handle(jobject handle) {
 212   return _weak_global_handles->chain_contains(handle);
 213 }
 214 
 215 long JNIHandles::global_handle_memory_usage() {
 216   return _global_handles->memory_usage();
 217 }
 218 
 219 long JNIHandles::weak_global_handle_memory_usage() {
 220   return _weak_global_handles->memory_usage();
 221 }
 222 
 223 
 224 class CountHandleClosure: public OopClosure {
 225 private:
 226   int _count;
 227 public:
 228   CountHandleClosure(): _count(0) {}
 229   virtual void do_oop(oop* ooph) {
 230     if (*ooph != JNIHandles::deleted_handle()) {
 231       _count++;
 232     }
 233   }
 234   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
 235   int count() { return _count; }
 236 };
 237 
 238 // We assume this is called at a safepoint: no lock is needed.
 239 void JNIHandles::print_on(outputStream* st) {
 240   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 241   assert(_global_handles != NULL && _weak_global_handles != NULL,
 242          "JNIHandles not initialized");
 243 
 244   CountHandleClosure global_handle_count;
 245   oops_do(&global_handle_count);
 246   weak_oops_do(&global_handle_count);
 247 
 248   st->print_cr("JNI global references: %d", global_handle_count.count());
 249   st->cr();
 250   st->flush();
 251 }
 252 
 253 class VerifyHandleClosure: public OopClosure {
 254 public:
 255   virtual void do_oop(oop* root) {
 256     (*root)->verify();
 257   }
 258   virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
 259 };
 260 
 261 void JNIHandles::verify() {
 262   VerifyHandleClosure verify_handle;
 263 
 264   oops_do(&verify_handle);
 265   weak_oops_do(&verify_handle);
 266 }
 267 
 268 
 269 
 270 void jni_handles_init() {
 271   JNIHandles::initialize();
 272 }
 273 
 274 
 275 int             JNIHandleBlock::_blocks_allocated     = 0;
 276 JNIHandleBlock* JNIHandleBlock::_block_free_list      = NULL;
 277 #ifndef PRODUCT
 278 JNIHandleBlock* JNIHandleBlock::_block_list           = NULL;
 279 #endif
 280 
 281 
 282 #ifdef ASSERT
 283 void JNIHandleBlock::zap() {
 284   // Zap block values
 285   _top = 0;
 286   for (int index = 0; index < block_size_in_oops; index++) {
 287     _handles[index] = NULL;
 288   }
 289 }
 290 #endif // ASSERT
 291 
 292 JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread)  {
 293   assert(thread == NULL || thread == Thread::current(), "sanity check");
 294   JNIHandleBlock* block;
 295   // Check the thread-local free list for a block so we don't
 296   // have to acquire a mutex.
 297   if (thread != NULL && thread->free_handle_block() != NULL) {
 298     block = thread->free_handle_block();
 299     thread->set_free_handle_block(block->_next);
 300   }
 301   else {
 302     // locking with safepoint checking introduces a potential deadlock:
 303     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 304     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 305     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 306     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 307                      Mutex::_no_safepoint_check_flag);
 308     if (_block_free_list == NULL) {
 309       // Allocate new block
 310       block = new JNIHandleBlock();
 311       _blocks_allocated++;
 312       block->zap();
 313       #ifndef PRODUCT
 314       // Link new block to list of all allocated blocks
 315       block->_block_list_link = _block_list;
 316       _block_list = block;
 317       #endif
 318     } else {
 319       // Get block from free list
 320       block = _block_free_list;
 321       _block_free_list = _block_free_list->_next;
 322     }
 323   }
 324   block->_top = 0;
 325   block->_next = NULL;
 326   block->_pop_frame_link = NULL;
 327   block->_planned_capacity = block_size_in_oops;
 328   // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
 329   debug_only(block->_last = NULL);
 330   debug_only(block->_free_list = NULL);
 331   debug_only(block->_allocate_before_rebuild = -1);
 332   return block;
 333 }
 334 
 335 
 336 void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
 337   assert(thread == NULL || thread == Thread::current(), "sanity check");
 338   JNIHandleBlock* pop_frame_link = block->pop_frame_link();
 339   // Put returned block at the beginning of the thread-local free list.
 340   // Note that if thread == NULL, we use it as an implicit argument that
 341   // we _don't_ want the block to be kept on the free_handle_block.
 342   // See for instance JavaThread::exit().
 343   if (thread != NULL ) {
 344     block->zap();
 345     JNIHandleBlock* freelist = thread->free_handle_block();
 346     block->_pop_frame_link = NULL;
 347     thread->set_free_handle_block(block);
 348 
 349     // Add original freelist to end of chain
 350     if ( freelist != NULL ) {
 351       while ( block->_next != NULL ) block = block->_next;
 352       block->_next = freelist;
 353     }
 354     block = NULL;
 355   }
 356   if (block != NULL) {
 357     // Return blocks to free list
 358     // locking with safepoint checking introduces a potential deadlock:
 359     // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
 360     // - another would hold Threads_lock (jni_AttachCurrentThread) and then
 361     //   JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
 362     MutexLockerEx ml(JNIHandleBlockFreeList_lock,
 363                      Mutex::_no_safepoint_check_flag);
 364     while (block != NULL) {
 365       block->zap();
 366       JNIHandleBlock* next = block->_next;
 367       block->_next = _block_free_list;
 368       _block_free_list = block;
 369       block = next;
 370     }
 371   }
 372   if (pop_frame_link != NULL) {
 373     // As a sanity check we release blocks pointed to by the pop_frame_link.
 374     // This should never happen (only if PopLocalFrame is not called the
 375     // correct number of times).
 376     release_block(pop_frame_link, thread);
 377   }
 378 }
 379 
 380 
 381 void JNIHandleBlock::oops_do(OopClosure* f) {
 382   JNIHandleBlock* current_chain = this;
 383   // Iterate over chain of blocks, followed by chains linked through the
 384   // pop frame links.
 385   while (current_chain != NULL) {
 386     for (JNIHandleBlock* current = current_chain; current != NULL;
 387          current = current->_next) {
 388       assert(current == current_chain || current->pop_frame_link() == NULL,
 389         "only blocks first in chain should have pop frame link set");
 390       for (int index = 0; index < current->_top; index++) {
 391         oop* root = &(current->_handles)[index];
 392         oop value = *root;
 393         // traverse heap pointers only, not deleted handles or free list
 394         // pointers
 395         if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 396           f->do_oop(root);
 397         }
 398       }
 399       // the next handle block is valid only if current block is full
 400       if (current->_top < block_size_in_oops) {
 401         break;
 402       }
 403     }
 404     current_chain = current_chain->pop_frame_link();
 405   }
 406 }
 407 
 408 
 409 void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
 410                                   OopClosure* f) {
 411   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 412     assert(current->pop_frame_link() == NULL,
 413       "blocks holding weak global JNI handles should not have pop frame link set");
 414     for (int index = 0; index < current->_top; index++) {
 415       oop* root = &(current->_handles)[index];
 416       oop value = *root;
 417       // traverse heap pointers only, not deleted handles or free list pointers
 418       if (value != NULL && Universe::heap()->is_in_reserved(value)) {
 419         if (is_alive->do_object_b(value)) {
 420           // The weakly referenced object is alive, update pointer
 421           f->do_oop(root);
 422         } else {
 423           // The weakly referenced object is not alive, clear the reference by storing NULL
 424           log_develop_trace(gc, ref)("Clearing JNI weak reference (" INTPTR_FORMAT ")", p2i(root));
 425           *root = NULL;
 426         }
 427       }
 428     }
 429     // the next handle block is valid only if current block is full
 430     if (current->_top < block_size_in_oops) {
 431       break;
 432     }
 433   }
 434 }
 435 
 436 
 437 jobject JNIHandleBlock::allocate_handle(oop obj) {
 438   assert(Universe::heap()->is_in_reserved(obj), "sanity check");
 439   if (_top == 0) {
 440     // This is the first allocation or the initial block got zapped when
 441     // entering a native function. If we have any following blocks they are
 442     // not valid anymore.
 443     for (JNIHandleBlock* current = _next; current != NULL;
 444          current = current->_next) {
 445       assert(current->_last == NULL, "only first block should have _last set");
 446       assert(current->_free_list == NULL,
 447              "only first block should have _free_list set");
 448       if (current->_top == 0) {
 449         // All blocks after the first clear trailing block are already cleared.
 450 #ifdef ASSERT
 451         for (current = current->_next; current != NULL; current = current->_next) {
 452           assert(current->_top == 0, "trailing blocks must already be cleared");
 453         }
 454 #endif
 455         break;
 456       }
 457       current->_top = 0;
 458       current->zap();
 459     }
 460     // Clear initial block
 461     _free_list = NULL;
 462     _allocate_before_rebuild = 0;
 463     _last = this;
 464     zap();
 465   }
 466 
 467   // Try last block
 468   if (_last->_top < block_size_in_oops) {
 469     oop* handle = &(_last->_handles)[_last->_top++];
 470     *handle = obj;
 471     return (jobject) handle;
 472   }
 473 
 474   // Try free list
 475   if (_free_list != NULL) {
 476     oop* handle = _free_list;
 477     _free_list = (oop*) *_free_list;
 478     *handle = obj;
 479     return (jobject) handle;
 480   }
 481   // Check if unused block follow last
 482   if (_last->_next != NULL) {
 483     // update last and retry
 484     _last = _last->_next;
 485     return allocate_handle(obj);
 486   }
 487 
 488   // No space available, we have to rebuild free list or expand
 489   if (_allocate_before_rebuild == 0) {
 490       rebuild_free_list();        // updates _allocate_before_rebuild counter
 491   } else {
 492     // Append new block
 493     Thread* thread = Thread::current();
 494     Handle obj_handle(thread, obj);
 495     // This can block, so we need to preserve obj across call.
 496     _last->_next = JNIHandleBlock::allocate_block(thread);
 497     _last = _last->_next;
 498     _allocate_before_rebuild--;
 499     obj = obj_handle();
 500   }
 501   return allocate_handle(obj);  // retry
 502 }
 503 
 504 void JNIHandleBlock::release_handle(jobject h) {
 505   if (h != NULL) {
 506     assert(chain_contains(h), "does not contain the JNI handle");
 507     // Mark the handle as deleted, allocate will reuse it
 508     *((oop*)h) = JNIHandles::deleted_handle();
 509   }
 510 }
 511 
 512 
 513 void JNIHandleBlock::rebuild_free_list() {
 514   assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
 515   int free = 0;
 516   int blocks = 0;
 517   for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
 518     for (int index = 0; index < current->_top; index++) {
 519       oop* handle = &(current->_handles)[index];
 520       if (*handle ==  JNIHandles::deleted_handle()) {
 521         // this handle was cleared out by a delete call, reuse it
 522         *handle = (oop) _free_list;
 523         _free_list = handle;
 524         free++;
 525       }
 526     }
 527     // we should not rebuild free list if there are unused handles at the end
 528     assert(current->_top == block_size_in_oops, "just checking");
 529     blocks++;
 530   }
 531   // Heuristic: if more than half of the handles are free we rebuild next time
 532   // as well, otherwise we append a corresponding number of new blocks before
 533   // attempting a free list rebuild again.
 534   int total = blocks * block_size_in_oops;
 535   int extra = total - 2*free;
 536   if (extra > 0) {
 537     // Not as many free handles as we would like - compute number of new blocks to append
 538     _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
 539   }
 540 }
 541 
 542 
 543 bool JNIHandleBlock::contains(jobject handle) const {
 544   return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
 545 }
 546 
 547 
 548 bool JNIHandleBlock::chain_contains(jobject handle) const {
 549   for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
 550     if (current->contains(handle)) {
 551       return true;
 552     }
 553   }
 554   return false;
 555 }
 556 
 557 
 558 int JNIHandleBlock::length() const {
 559   int result = 1;
 560   for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
 561     result++;
 562   }
 563   return result;
 564 }
 565 
 566 const size_t JNIHandleBlock::get_number_of_live_handles() {
 567   CountHandleClosure counter;
 568   oops_do(&counter);
 569   return counter.count();
 570 }
 571 
 572 // This method is not thread-safe, i.e., must be called while holding a lock on the
 573 // structure.
 574 long JNIHandleBlock::memory_usage() const {
 575   return length() * sizeof(JNIHandleBlock);
 576 }
 577 
 578 
 579 #ifndef PRODUCT
 580 
 581 bool JNIHandleBlock::any_contains(jobject handle) {
 582   for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
 583     if (current->contains(handle)) {
 584       return true;
 585     }
 586   }
 587   return false;
 588 }
 589 
 590 void JNIHandleBlock::print_statistics() {
 591   int used_blocks = 0;
 592   int free_blocks = 0;
 593   int used_handles = 0;
 594   int free_handles = 0;
 595   JNIHandleBlock* block = _block_list;
 596   while (block != NULL) {
 597     if (block->_top > 0) {
 598       used_blocks++;
 599     } else {
 600       free_blocks++;
 601     }
 602     used_handles += block->_top;
 603     free_handles += (block_size_in_oops - block->_top);
 604     block = block->_block_list_link;
 605   }
 606   tty->print_cr("JNIHandleBlocks statistics");
 607   tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks);
 608   tty->print_cr("- blocks in use:    %d", used_blocks);
 609   tty->print_cr("- blocks free:      %d", free_blocks);
 610   tty->print_cr("- handles in use:   %d", used_handles);
 611   tty->print_cr("- handles free:     %d", free_handles);
 612 }
 613 
 614 #endif