1 /* 2 * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/moduleEntry.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "jvmtifiles/jvmtiEnv.hpp" 30 #include "memory/iterator.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/objArrayOop.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "oops/oopHandle.inline.hpp" 36 #include "prims/jvmtiEnvBase.hpp" 37 #include "prims/jvmtiEventController.inline.hpp" 38 #include "prims/jvmtiExtensions.hpp" 39 #include "prims/jvmtiImpl.hpp" 40 #include "prims/jvmtiManageCapabilities.hpp" 41 #include "prims/jvmtiTagMap.hpp" 42 #include "prims/jvmtiThreadState.inline.hpp" 43 #include "runtime/biasedLocking.hpp" 44 #include "runtime/deoptimization.hpp" 45 #include "runtime/frame.inline.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/interfaceSupport.inline.hpp" 48 #include "runtime/jfieldIDWorkaround.hpp" 49 #include "runtime/jniHandles.inline.hpp" 50 #include "runtime/objectMonitor.hpp" 51 #include "runtime/objectMonitor.inline.hpp" 52 #include "runtime/signature.hpp" 53 #include "runtime/thread.inline.hpp" 54 #include "runtime/threadSMR.hpp" 55 #include "runtime/vframe.hpp" 56 #include "runtime/vframe_hp.hpp" 57 #include "runtime/vmThread.hpp" 58 #include "runtime/vmOperations.hpp" 59 60 /////////////////////////////////////////////////////////////// 61 // 62 // JvmtiEnvBase 63 // 64 65 JvmtiEnvBase* JvmtiEnvBase::_head_environment = NULL; 66 67 bool JvmtiEnvBase::_globally_initialized = false; 68 volatile bool JvmtiEnvBase::_needs_clean_up = false; 69 70 jvmtiPhase JvmtiEnvBase::_phase = JVMTI_PHASE_PRIMORDIAL; 71 72 volatile int JvmtiEnvBase::_dying_thread_env_iteration_count = 0; 73 74 extern jvmtiInterface_1_ jvmti_Interface; 75 extern jvmtiInterface_1_ jvmtiTrace_Interface; 76 77 78 // perform initializations that must occur before any JVMTI environments 79 // are released but which should only be initialized once (no matter 80 // how many environments are created). 81 void 82 JvmtiEnvBase::globally_initialize() { 83 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 84 assert(_globally_initialized == false, "bad call"); 85 86 JvmtiManageCapabilities::initialize(); 87 88 // register extension functions and events 89 JvmtiExtensions::register_extensions(); 90 91 #ifdef JVMTI_TRACE 92 JvmtiTrace::initialize(); 93 #endif 94 95 _globally_initialized = true; 96 } 97 98 99 void 100 JvmtiEnvBase::initialize() { 101 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 102 103 // Add this environment to the end of the environment list (order is important) 104 { 105 // This block of code must not contain any safepoints, as list deallocation 106 // (which occurs at a safepoint) cannot occur simultaneously with this list 107 // addition. Note: NoSafepointVerifier cannot, currently, be used before 108 // threads exist. 109 JvmtiEnvIterator it; 110 JvmtiEnvBase *previous_env = NULL; 111 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { 112 previous_env = env; 113 } 114 if (previous_env == NULL) { 115 _head_environment = this; 116 } else { 117 previous_env->set_next_environment(this); 118 } 119 } 120 121 if (_globally_initialized == false) { 122 globally_initialize(); 123 } 124 } 125 126 jvmtiPhase 127 JvmtiEnvBase::phase() { 128 // For the JVMTI environments possessed the can_generate_early_vmstart: 129 // replace JVMTI_PHASE_PRIMORDIAL with JVMTI_PHASE_START 130 if (_phase == JVMTI_PHASE_PRIMORDIAL && 131 JvmtiExport::early_vmstart_recorded() && 132 early_vmstart_env()) { 133 return JVMTI_PHASE_START; 134 } 135 return _phase; // Normal case 136 } 137 138 bool 139 JvmtiEnvBase::is_valid() { 140 jint value = 0; 141 142 // This object might not be a JvmtiEnvBase so we can't assume 143 // the _magic field is properly aligned. Get the value in a safe 144 // way and then check against JVMTI_MAGIC. 145 146 switch (sizeof(_magic)) { 147 case 2: 148 value = Bytes::get_native_u2((address)&_magic); 149 break; 150 151 case 4: 152 value = Bytes::get_native_u4((address)&_magic); 153 break; 154 155 case 8: 156 value = Bytes::get_native_u8((address)&_magic); 157 break; 158 159 default: 160 guarantee(false, "_magic field is an unexpected size"); 161 } 162 163 return value == JVMTI_MAGIC; 164 } 165 166 167 bool 168 JvmtiEnvBase::use_version_1_0_semantics() { 169 int major, minor, micro; 170 171 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 172 return major == 1 && minor == 0; // micro version doesn't matter here 173 } 174 175 176 bool 177 JvmtiEnvBase::use_version_1_1_semantics() { 178 int major, minor, micro; 179 180 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 181 return major == 1 && minor == 1; // micro version doesn't matter here 182 } 183 184 bool 185 JvmtiEnvBase::use_version_1_2_semantics() { 186 int major, minor, micro; 187 188 JvmtiExport::decode_version_values(_version, &major, &minor, µ); 189 return major == 1 && minor == 2; // micro version doesn't matter here 190 } 191 192 193 JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() { 194 _version = version; 195 _env_local_storage = NULL; 196 _tag_map = NULL; 197 _native_method_prefix_count = 0; 198 _native_method_prefixes = NULL; 199 _next = NULL; 200 _class_file_load_hook_ever_enabled = false; 201 202 // Moot since ClassFileLoadHook not yet enabled. 203 // But "true" will give a more predictable ClassFileLoadHook behavior 204 // for environment creation during ClassFileLoadHook. 205 _is_retransformable = true; 206 207 // all callbacks initially NULL 208 memset(&_event_callbacks,0,sizeof(jvmtiEventCallbacks)); 209 210 // all capabilities initially off 211 memset(&_current_capabilities, 0, sizeof(_current_capabilities)); 212 213 // all prohibited capabilities initially off 214 memset(&_prohibited_capabilities, 0, sizeof(_prohibited_capabilities)); 215 216 _magic = JVMTI_MAGIC; 217 218 JvmtiEventController::env_initialize((JvmtiEnv*)this); 219 220 #ifdef JVMTI_TRACE 221 _jvmti_external.functions = TraceJVMTI != NULL ? &jvmtiTrace_Interface : &jvmti_Interface; 222 #else 223 _jvmti_external.functions = &jvmti_Interface; 224 #endif 225 } 226 227 228 void 229 JvmtiEnvBase::dispose() { 230 231 #ifdef JVMTI_TRACE 232 JvmtiTrace::shutdown(); 233 #endif 234 235 // Dispose of event info and let the event controller call us back 236 // in a locked state (env_dispose, below) 237 JvmtiEventController::env_dispose(this); 238 } 239 240 void 241 JvmtiEnvBase::env_dispose() { 242 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 243 244 // We have been entered with all events disabled on this environment. 245 // A race to re-enable events (by setting callbacks) is prevented by 246 // checking for a valid environment when setting callbacks (while 247 // holding the JvmtiThreadState_lock). 248 249 // Mark as invalid. 250 _magic = DISPOSED_MAGIC; 251 252 // Relinquish all capabilities. 253 jvmtiCapabilities *caps = get_capabilities(); 254 JvmtiManageCapabilities::relinquish_capabilities(caps, caps, caps); 255 256 // Same situation as with events (see above) 257 set_native_method_prefixes(0, NULL); 258 259 JvmtiTagMap* tag_map_to_deallocate = _tag_map; 260 set_tag_map(NULL); 261 // A tag map can be big, deallocate it now 262 if (tag_map_to_deallocate != NULL) { 263 delete tag_map_to_deallocate; 264 } 265 266 _needs_clean_up = true; 267 } 268 269 270 JvmtiEnvBase::~JvmtiEnvBase() { 271 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 272 273 // There is a small window of time during which the tag map of a 274 // disposed environment could have been reallocated. 275 // Make sure it is gone. 276 JvmtiTagMap* tag_map_to_deallocate = _tag_map; 277 set_tag_map(NULL); 278 // A tag map can be big, deallocate it now 279 if (tag_map_to_deallocate != NULL) { 280 delete tag_map_to_deallocate; 281 } 282 283 _magic = BAD_MAGIC; 284 } 285 286 287 void 288 JvmtiEnvBase::periodic_clean_up() { 289 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 290 291 // JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So 292 // clean up JvmtiThreadState before deleting JvmtiEnv pointer. 293 JvmtiThreadState::periodic_clean_up(); 294 295 // Unlink all invalid environments from the list of environments 296 // and deallocate them 297 JvmtiEnvIterator it; 298 JvmtiEnvBase* previous_env = NULL; 299 JvmtiEnvBase* env = it.first(); 300 while (env != NULL) { 301 if (env->is_valid()) { 302 previous_env = env; 303 env = it.next(env); 304 } else { 305 // This one isn't valid, remove it from the list and deallocate it 306 JvmtiEnvBase* defunct_env = env; 307 env = it.next(env); 308 if (previous_env == NULL) { 309 _head_environment = env; 310 } else { 311 previous_env->set_next_environment(env); 312 } 313 delete defunct_env; 314 } 315 } 316 317 } 318 319 320 void 321 JvmtiEnvBase::check_for_periodic_clean_up() { 322 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 323 324 class ThreadInsideIterationClosure: public ThreadClosure { 325 private: 326 bool _inside; 327 public: 328 ThreadInsideIterationClosure() : _inside(false) {}; 329 330 void do_thread(Thread* thread) { 331 _inside |= thread->is_inside_jvmti_env_iteration(); 332 } 333 334 bool is_inside_jvmti_env_iteration() { 335 return _inside; 336 } 337 }; 338 339 if (_needs_clean_up) { 340 // Check if we are currently iterating environment, 341 // deallocation should not occur if we are 342 ThreadInsideIterationClosure tiic; 343 Threads::threads_do(&tiic); 344 if (!tiic.is_inside_jvmti_env_iteration() && 345 !is_inside_dying_thread_env_iteration()) { 346 _needs_clean_up = false; 347 JvmtiEnvBase::periodic_clean_up(); 348 } 349 } 350 } 351 352 353 void 354 JvmtiEnvBase::record_first_time_class_file_load_hook_enabled() { 355 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), 356 "sanity check"); 357 358 if (!_class_file_load_hook_ever_enabled) { 359 _class_file_load_hook_ever_enabled = true; 360 361 if (get_capabilities()->can_retransform_classes) { 362 _is_retransformable = true; 363 } else { 364 _is_retransformable = false; 365 366 // cannot add retransform capability after ClassFileLoadHook has been enabled 367 get_prohibited_capabilities()->can_retransform_classes = 1; 368 } 369 } 370 } 371 372 373 void 374 JvmtiEnvBase::record_class_file_load_hook_enabled() { 375 if (!_class_file_load_hook_ever_enabled) { 376 if (Threads::number_of_threads() == 0) { 377 record_first_time_class_file_load_hook_enabled(); 378 } else { 379 MutexLocker mu(JvmtiThreadState_lock); 380 record_first_time_class_file_load_hook_enabled(); 381 } 382 } 383 } 384 385 386 jvmtiError 387 JvmtiEnvBase::set_native_method_prefixes(jint prefix_count, char** prefixes) { 388 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), 389 "sanity check"); 390 391 int old_prefix_count = get_native_method_prefix_count(); 392 char **old_prefixes = get_native_method_prefixes(); 393 394 // allocate and install the new prefixex 395 if (prefix_count == 0 || !is_valid()) { 396 _native_method_prefix_count = 0; 397 _native_method_prefixes = NULL; 398 } else { 399 // there are prefixes, allocate an array to hold them, and fill it 400 char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*), mtInternal); 401 if (new_prefixes == NULL) { 402 return JVMTI_ERROR_OUT_OF_MEMORY; 403 } 404 for (int i = 0; i < prefix_count; i++) { 405 char* prefix = prefixes[i]; 406 if (prefix == NULL) { 407 for (int j = 0; j < (i-1); j++) { 408 os::free(new_prefixes[j]); 409 } 410 os::free(new_prefixes); 411 return JVMTI_ERROR_NULL_POINTER; 412 } 413 prefix = os::strdup(prefixes[i]); 414 if (prefix == NULL) { 415 for (int j = 0; j < (i-1); j++) { 416 os::free(new_prefixes[j]); 417 } 418 os::free(new_prefixes); 419 return JVMTI_ERROR_OUT_OF_MEMORY; 420 } 421 new_prefixes[i] = prefix; 422 } 423 _native_method_prefix_count = prefix_count; 424 _native_method_prefixes = new_prefixes; 425 } 426 427 // now that we know the new prefixes have been successfully installed we can 428 // safely remove the old ones 429 if (old_prefix_count != 0) { 430 for (int i = 0; i < old_prefix_count; i++) { 431 os::free(old_prefixes[i]); 432 } 433 os::free(old_prefixes); 434 } 435 436 return JVMTI_ERROR_NONE; 437 } 438 439 440 // Collect all the prefixes which have been set in any JVM TI environments 441 // by the SetNativeMethodPrefix(es) functions. Be sure to maintain the 442 // order of environments and the order of prefixes within each environment. 443 // Return in a resource allocated array. 444 char** 445 JvmtiEnvBase::get_all_native_method_prefixes(int* count_ptr) { 446 assert(Threads::number_of_threads() == 0 || 447 SafepointSynchronize::is_at_safepoint() || 448 JvmtiThreadState_lock->is_locked(), 449 "sanity check"); 450 451 int total_count = 0; 452 GrowableArray<char*>* prefix_array =new GrowableArray<char*>(5); 453 454 JvmtiEnvIterator it; 455 for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { 456 int prefix_count = env->get_native_method_prefix_count(); 457 char** prefixes = env->get_native_method_prefixes(); 458 for (int j = 0; j < prefix_count; j++) { 459 // retrieve a prefix and so that it is safe against asynchronous changes 460 // copy it into the resource area 461 char* prefix = prefixes[j]; 462 char* prefix_copy = NEW_RESOURCE_ARRAY(char, strlen(prefix)+1); 463 strcpy(prefix_copy, prefix); 464 prefix_array->at_put_grow(total_count++, prefix_copy); 465 } 466 } 467 468 char** all_prefixes = NEW_RESOURCE_ARRAY(char*, total_count); 469 char** p = all_prefixes; 470 for (int i = 0; i < total_count; ++i) { 471 *p++ = prefix_array->at(i); 472 } 473 *count_ptr = total_count; 474 return all_prefixes; 475 } 476 477 void 478 JvmtiEnvBase::set_event_callbacks(const jvmtiEventCallbacks* callbacks, 479 jint size_of_callbacks) { 480 assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check"); 481 482 size_t byte_cnt = sizeof(jvmtiEventCallbacks); 483 484 // clear in either case to be sure we got any gap between sizes 485 memset(&_event_callbacks, 0, byte_cnt); 486 487 // Now that JvmtiThreadState_lock is held, prevent a possible race condition where events 488 // are re-enabled by a call to set event callbacks where the DisposeEnvironment 489 // occurs after the boiler-plate environment check and before the lock is acquired. 490 if (callbacks != NULL && is_valid()) { 491 if (size_of_callbacks < (jint)byte_cnt) { 492 byte_cnt = size_of_callbacks; 493 } 494 memcpy(&_event_callbacks, callbacks, byte_cnt); 495 } 496 } 497 498 499 // In the fullness of time, all users of the method should instead 500 // directly use allocate, besides being cleaner and faster, this will 501 // mean much better out of memory handling 502 unsigned char * 503 JvmtiEnvBase::jvmtiMalloc(jlong size) { 504 unsigned char* mem = NULL; 505 jvmtiError result = allocate(size, &mem); 506 assert(result == JVMTI_ERROR_NONE, "Allocate failed"); 507 return mem; 508 } 509 510 511 // Handle management 512 513 jobject JvmtiEnvBase::jni_reference(Handle hndl) { 514 return JNIHandles::make_local(hndl()); 515 } 516 517 jobject JvmtiEnvBase::jni_reference(JavaThread *thread, Handle hndl) { 518 return JNIHandles::make_local(thread, hndl()); 519 } 520 521 void JvmtiEnvBase::destroy_jni_reference(jobject jobj) { 522 JNIHandles::destroy_local(jobj); 523 } 524 525 void JvmtiEnvBase::destroy_jni_reference(JavaThread *thread, jobject jobj) { 526 JNIHandles::destroy_local(jobj); // thread is unused. 527 } 528 529 // 530 // Threads 531 // 532 533 jobject * 534 JvmtiEnvBase::new_jobjectArray(int length, Handle *handles) { 535 if (length == 0) { 536 return NULL; 537 } 538 539 jobject *objArray = (jobject *) jvmtiMalloc(sizeof(jobject) * length); 540 NULL_CHECK(objArray, NULL); 541 542 for (int i=0; i<length; i++) { 543 objArray[i] = jni_reference(handles[i]); 544 } 545 return objArray; 546 } 547 548 jthread * 549 JvmtiEnvBase::new_jthreadArray(int length, Handle *handles) { 550 return (jthread *) new_jobjectArray(length,handles); 551 } 552 553 jthreadGroup * 554 JvmtiEnvBase::new_jthreadGroupArray(int length, Handle *handles) { 555 return (jthreadGroup *) new_jobjectArray(length,handles); 556 } 557 558 // return the vframe on the specified thread and depth, NULL if no such frame 559 vframe* 560 JvmtiEnvBase::vframeFor(JavaThread* java_thread, jint depth) { 561 if (!java_thread->has_last_Java_frame()) { 562 return NULL; 563 } 564 RegisterMap reg_map(java_thread); 565 vframe *vf = java_thread->last_java_vframe(®_map); 566 int d = 0; 567 while ((vf != NULL) && (d < depth)) { 568 vf = vf->java_sender(); 569 d++; 570 } 571 return vf; 572 } 573 574 575 // 576 // utilities: JNI objects 577 // 578 579 580 jclass 581 JvmtiEnvBase::get_jni_class_non_null(Klass* k) { 582 assert(k != NULL, "k != NULL"); 583 Thread *thread = Thread::current(); 584 return (jclass)jni_reference(Handle(thread, k->java_mirror())); 585 } 586 587 // 588 // Field Information 589 // 590 591 bool 592 JvmtiEnvBase::get_field_descriptor(Klass* k, jfieldID field, fieldDescriptor* fd) { 593 if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) { 594 return false; 595 } 596 bool found = false; 597 if (jfieldIDWorkaround::is_static_jfieldID(field)) { 598 JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field); 599 found = id->find_local_field(fd); 600 } else { 601 // Non-static field. The fieldID is really the offset of the field within the object. 602 int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field); 603 found = InstanceKlass::cast(k)->find_field_from_offset(offset, false, fd); 604 } 605 return found; 606 } 607 608 // 609 // Object Monitor Information 610 // 611 612 // 613 // Count the number of objects for a lightweight monitor. The hobj 614 // parameter is object that owns the monitor so this routine will 615 // count the number of times the same object was locked by frames 616 // in java_thread. 617 // 618 jint 619 JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) { 620 jint ret = 0; 621 if (!java_thread->has_last_Java_frame()) { 622 return ret; // no Java frames so no monitors 623 } 624 625 Thread* current_thread = Thread::current(); 626 ResourceMark rm(current_thread); 627 HandleMark hm(current_thread); 628 RegisterMap reg_map(java_thread); 629 630 for(javaVFrame *jvf=java_thread->last_java_vframe(®_map); jvf != NULL; 631 jvf = jvf->java_sender()) { 632 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); 633 if (!mons->is_empty()) { 634 for (int i = 0; i < mons->length(); i++) { 635 MonitorInfo *mi = mons->at(i); 636 if (mi->owner_is_scalar_replaced()) continue; 637 638 // see if owner of the monitor is our object 639 if (mi->owner() != NULL && mi->owner() == hobj()) { 640 ret++; 641 } 642 } 643 } 644 } 645 return ret; 646 } 647 648 649 650 jvmtiError 651 JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThread *java_thread, jobject *monitor_ptr) { 652 JavaThread *current_jt = JavaThread::current(); 653 assert(current_jt == java_thread || 654 current_jt == java_thread->active_handshaker(), 655 "call by myself or at direct handshake"); 656 oop obj = NULL; 657 // The ObjectMonitor* can't be async deflated since we are either 658 // at a safepoint or the calling thread is operating on itself so 659 // it cannot leave the underlying wait()/enter() call. 660 ObjectMonitor *mon = java_thread->current_waiting_monitor(); 661 if (mon == NULL) { 662 // thread is not doing an Object.wait() call 663 mon = java_thread->current_pending_monitor(); 664 if (mon != NULL) { 665 // The thread is trying to enter() an ObjectMonitor. 666 obj = (oop)mon->object(); 667 assert(obj != NULL, "ObjectMonitor should have a valid object!"); 668 } 669 // implied else: no contended ObjectMonitor 670 } else { 671 // thread is doing an Object.wait() call 672 obj = (oop)mon->object(); 673 assert(obj != NULL, "Object.wait() should have an object"); 674 } 675 676 if (obj == NULL) { 677 *monitor_ptr = NULL; 678 } else { 679 HandleMark hm(current_jt); 680 Handle hobj(current_jt, obj); 681 *monitor_ptr = jni_reference(calling_thread, hobj); 682 } 683 return JVMTI_ERROR_NONE; 684 } 685 686 687 jvmtiError 688 JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread, 689 GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list) { 690 jvmtiError err = JVMTI_ERROR_NONE; 691 JavaThread *current_jt = JavaThread::current(); 692 assert(current_jt == java_thread || 693 current_jt == java_thread->active_handshaker(), 694 "call by myself or at direct handshake"); 695 696 if (java_thread->has_last_Java_frame()) { 697 ResourceMark rm(current_jt); 698 HandleMark hm(current_jt); 699 RegisterMap reg_map(java_thread); 700 701 int depth = 0; 702 for (javaVFrame *jvf = java_thread->last_java_vframe(®_map); jvf != NULL; 703 jvf = jvf->java_sender()) { 704 if (MaxJavaStackTraceDepth == 0 || depth++ < MaxJavaStackTraceDepth) { // check for stack too deep 705 // add locked objects for this frame into list 706 err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1); 707 if (err != JVMTI_ERROR_NONE) { 708 return err; 709 } 710 } 711 } 712 } 713 714 // Get off stack monitors. (e.g. acquired via jni MonitorEnter). 715 JvmtiMonitorClosure jmc(java_thread, calling_thread, owned_monitors_list, this); 716 ObjectSynchronizer::monitors_iterate(&jmc); 717 err = jmc.error(); 718 719 return err; 720 } 721 722 // Save JNI local handles for any objects that this frame owns. 723 jvmtiError 724 JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread, 725 javaVFrame *jvf, GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list, jint stack_depth) { 726 jvmtiError err = JVMTI_ERROR_NONE; 727 Thread* current_thread = Thread::current(); 728 ResourceMark rm(current_thread); 729 HandleMark hm(current_thread); 730 731 GrowableArray<MonitorInfo*>* mons = jvf->monitors(); 732 if (mons->is_empty()) { 733 return err; // this javaVFrame holds no monitors 734 } 735 736 oop wait_obj = NULL; 737 { 738 // The ObjectMonitor* can't be async deflated since we are either 739 // at a safepoint or the calling thread is operating on itself so 740 // it cannot leave the underlying wait() call. 741 // Save object of current wait() call (if any) for later comparison. 742 ObjectMonitor *mon = java_thread->current_waiting_monitor(); 743 if (mon != NULL) { 744 wait_obj = (oop)mon->object(); 745 } 746 } 747 oop pending_obj = NULL; 748 { 749 // The ObjectMonitor* can't be async deflated since we are either 750 // at a safepoint or the calling thread is operating on itself so 751 // it cannot leave the underlying enter() call. 752 // Save object of current enter() call (if any) for later comparison. 753 ObjectMonitor *mon = java_thread->current_pending_monitor(); 754 if (mon != NULL) { 755 pending_obj = (oop)mon->object(); 756 } 757 } 758 759 for (int i = 0; i < mons->length(); i++) { 760 MonitorInfo *mi = mons->at(i); 761 762 if (mi->owner_is_scalar_replaced()) continue; 763 764 oop obj = mi->owner(); 765 if (obj == NULL) { 766 // this monitor doesn't have an owning object so skip it 767 continue; 768 } 769 770 if (wait_obj == obj) { 771 // the thread is waiting on this monitor so it isn't really owned 772 continue; 773 } 774 775 if (pending_obj == obj) { 776 // the thread is pending on this monitor so it isn't really owned 777 continue; 778 } 779 780 if (owned_monitors_list->length() > 0) { 781 // Our list has at least one object on it so we have to check 782 // for recursive object locking 783 bool found = false; 784 for (int j = 0; j < owned_monitors_list->length(); j++) { 785 jobject jobj = ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(j))->monitor; 786 oop check = JNIHandles::resolve(jobj); 787 if (check == obj) { 788 found = true; // we found the object 789 break; 790 } 791 } 792 793 if (found) { 794 // already have this object so don't include it 795 continue; 796 } 797 } 798 799 // add the owning object to our list 800 jvmtiMonitorStackDepthInfo *jmsdi; 801 err = allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); 802 if (err != JVMTI_ERROR_NONE) { 803 return err; 804 } 805 Handle hobj(Thread::current(), obj); 806 jmsdi->monitor = jni_reference(calling_thread, hobj); 807 jmsdi->stack_depth = stack_depth; 808 owned_monitors_list->append(jmsdi); 809 } 810 811 return err; 812 } 813 814 jvmtiError 815 JvmtiEnvBase::get_stack_trace(JavaThread *java_thread, 816 jint start_depth, jint max_count, 817 jvmtiFrameInfo* frame_buffer, jint* count_ptr) { 818 #ifdef ASSERT 819 uint32_t debug_bits = 0; 820 #endif 821 Thread *current_thread = Thread::current(); 822 assert(current_thread == java_thread || 823 SafepointSynchronize::is_at_safepoint() || 824 current_thread == java_thread->active_handshaker(), 825 "call by myself / at safepoint / at handshake"); 826 int count = 0; 827 if (java_thread->has_last_Java_frame()) { 828 RegisterMap reg_map(java_thread); 829 ResourceMark rm(current_thread); 830 javaVFrame *jvf = java_thread->last_java_vframe(®_map); 831 HandleMark hm(current_thread); 832 if (start_depth != 0) { 833 if (start_depth > 0) { 834 for (int j = 0; j < start_depth && jvf != NULL; j++) { 835 jvf = jvf->java_sender(); 836 } 837 if (jvf == NULL) { 838 // start_depth is deeper than the stack depth 839 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 840 } 841 } else { // start_depth < 0 842 // we are referencing the starting depth based on the oldest 843 // part of the stack. 844 // optimize to limit the number of times that java_sender() is called 845 javaVFrame *jvf_cursor = jvf; 846 javaVFrame *jvf_prev = NULL; 847 javaVFrame *jvf_prev_prev = NULL; 848 int j = 0; 849 while (jvf_cursor != NULL) { 850 jvf_prev_prev = jvf_prev; 851 jvf_prev = jvf_cursor; 852 for (j = 0; j > start_depth && jvf_cursor != NULL; j--) { 853 jvf_cursor = jvf_cursor->java_sender(); 854 } 855 } 856 if (j == start_depth) { 857 // previous pointer is exactly where we want to start 858 jvf = jvf_prev; 859 } else { 860 // we need to back up further to get to the right place 861 if (jvf_prev_prev == NULL) { 862 // the -start_depth is greater than the stack depth 863 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 864 } 865 // j now is the number of frames on the stack starting with 866 // jvf_prev, we start from jvf_prev_prev and move older on 867 // the stack that many, the result is -start_depth frames 868 // remaining. 869 jvf = jvf_prev_prev; 870 for (; j < 0; j++) { 871 jvf = jvf->java_sender(); 872 } 873 } 874 } 875 } 876 for (; count < max_count && jvf != NULL; count++) { 877 frame_buffer[count].method = jvf->method()->jmethod_id(); 878 frame_buffer[count].location = (jvf->method()->is_native() ? -1 : jvf->bci()); 879 jvf = jvf->java_sender(); 880 } 881 } else { 882 if (start_depth != 0) { 883 // no frames and there is a starting depth 884 return JVMTI_ERROR_ILLEGAL_ARGUMENT; 885 } 886 } 887 *count_ptr = count; 888 return JVMTI_ERROR_NONE; 889 } 890 891 jvmtiError 892 JvmtiEnvBase::get_frame_count(JvmtiThreadState *state, jint *count_ptr) { 893 assert((state != NULL), 894 "JavaThread should create JvmtiThreadState before calling this method"); 895 *count_ptr = state->count_frames(); 896 return JVMTI_ERROR_NONE; 897 } 898 899 jvmtiError 900 JvmtiEnvBase::get_frame_location(JavaThread *java_thread, jint depth, 901 jmethodID* method_ptr, jlocation* location_ptr) { 902 #ifdef ASSERT 903 uint32_t debug_bits = 0; 904 #endif 905 Thread* current_thread = Thread::current(); 906 assert(current_thread == java_thread || 907 current_thread == java_thread->active_handshaker(), 908 "call by myself or at direct handshake"); 909 ResourceMark rm(current_thread); 910 911 vframe *vf = vframeFor(java_thread, depth); 912 if (vf == NULL) { 913 return JVMTI_ERROR_NO_MORE_FRAMES; 914 } 915 916 // vframeFor should return a java frame. If it doesn't 917 // it means we've got an internal error and we return the 918 // error in product mode. In debug mode we will instead 919 // attempt to cast the vframe to a javaVFrame and will 920 // cause an assertion/crash to allow further diagnosis. 921 #ifdef PRODUCT 922 if (!vf->is_java_frame()) { 923 return JVMTI_ERROR_INTERNAL; 924 } 925 #endif 926 927 HandleMark hm(current_thread); 928 javaVFrame *jvf = javaVFrame::cast(vf); 929 Method* method = jvf->method(); 930 if (method->is_native()) { 931 *location_ptr = -1; 932 } else { 933 *location_ptr = jvf->bci(); 934 } 935 *method_ptr = method->jmethod_id(); 936 937 return JVMTI_ERROR_NONE; 938 } 939 940 941 jvmtiError 942 JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject object, jvmtiMonitorUsage* info_ptr) { 943 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 944 Thread* current_thread = VMThread::vm_thread(); 945 assert(current_thread == Thread::current(), "must be"); 946 947 HandleMark hm(current_thread); 948 Handle hobj; 949 950 // Check arguments 951 { 952 oop mirror = JNIHandles::resolve_external_guard(object); 953 NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT); 954 NULL_CHECK(info_ptr, JVMTI_ERROR_NULL_POINTER); 955 956 hobj = Handle(current_thread, mirror); 957 } 958 959 ThreadsListHandle tlh(current_thread); 960 JavaThread *owning_thread = NULL; 961 ObjectMonitor *mon = NULL; 962 jvmtiMonitorUsage ret = { 963 NULL, 0, 0, NULL, 0, NULL 964 }; 965 966 uint32_t debug_bits = 0; 967 // first derive the object's owner and entry_count (if any) 968 { 969 // Revoke any biases before querying the mark word 970 BiasedLocking::revoke_at_safepoint(hobj); 971 972 address owner = NULL; 973 { 974 markWord mark = hobj()->mark(); 975 976 if (!mark.has_monitor()) { 977 // this object has a lightweight monitor 978 979 if (mark.has_locker()) { 980 owner = (address)mark.locker(); // save the address of the Lock word 981 } 982 // implied else: no owner 983 } else { 984 // this object has a heavyweight monitor 985 mon = mark.monitor(); 986 987 // The owner field of a heavyweight monitor may be NULL for no 988 // owner, a JavaThread * or it may still be the address of the 989 // Lock word in a JavaThread's stack. A monitor can be inflated 990 // by a non-owning JavaThread, but only the owning JavaThread 991 // can change the owner field from the Lock word to the 992 // JavaThread * and it may not have done that yet. 993 owner = (address)mon->owner(); 994 } 995 } 996 997 if (owner != NULL) { 998 // This monitor is owned so we have to find the owning JavaThread. 999 owning_thread = Threads::owning_thread_from_monitor_owner(tlh.list(), owner); 1000 assert(owning_thread != NULL, "owning JavaThread must not be NULL"); 1001 Handle th(current_thread, owning_thread->threadObj()); 1002 ret.owner = (jthread)jni_reference(calling_thread, th); 1003 } 1004 1005 if (owning_thread != NULL) { // monitor is owned 1006 // The recursions field of a monitor does not reflect recursions 1007 // as lightweight locks before inflating the monitor are not included. 1008 // We have to count the number of recursive monitor entries the hard way. 1009 // We pass a handle to survive any GCs along the way. 1010 ret.entry_count = count_locked_objects(owning_thread, hobj); 1011 } 1012 // implied else: entry_count == 0 1013 } 1014 1015 jint nWant = 0, nWait = 0; 1016 if (mon != NULL) { 1017 // this object has a heavyweight monitor 1018 nWant = mon->contentions(); // # of threads contending for monitor 1019 nWait = mon->waiters(); // # of threads in Object.wait() 1020 ret.waiter_count = nWant + nWait; 1021 ret.notify_waiter_count = nWait; 1022 } else { 1023 // this object has a lightweight monitor 1024 ret.waiter_count = 0; 1025 ret.notify_waiter_count = 0; 1026 } 1027 1028 // Allocate memory for heavyweight and lightweight monitor. 1029 jvmtiError err; 1030 err = allocate(ret.waiter_count * sizeof(jthread *), (unsigned char**)&ret.waiters); 1031 if (err != JVMTI_ERROR_NONE) { 1032 return err; 1033 } 1034 err = allocate(ret.notify_waiter_count * sizeof(jthread *), 1035 (unsigned char**)&ret.notify_waiters); 1036 if (err != JVMTI_ERROR_NONE) { 1037 deallocate((unsigned char*)ret.waiters); 1038 return err; 1039 } 1040 1041 // now derive the rest of the fields 1042 if (mon != NULL) { 1043 // this object has a heavyweight monitor 1044 1045 // Number of waiters may actually be less than the waiter count. 1046 // So NULL out memory so that unused memory will be NULL. 1047 memset(ret.waiters, 0, ret.waiter_count * sizeof(jthread *)); 1048 memset(ret.notify_waiters, 0, ret.notify_waiter_count * sizeof(jthread *)); 1049 1050 if (ret.waiter_count > 0) { 1051 // we have contending and/or waiting threads 1052 if (nWant > 0) { 1053 // we have contending threads 1054 ResourceMark rm(current_thread); 1055 // get_pending_threads returns only java thread so we do not need to 1056 // check for non java threads. 1057 GrowableArray<JavaThread*>* wantList = Threads::get_pending_threads(tlh.list(), nWant, (address)mon); 1058 if (wantList->length() < nWant) { 1059 // robustness: the pending list has gotten smaller 1060 nWant = wantList->length(); 1061 } 1062 for (int i = 0; i < nWant; i++) { 1063 JavaThread *pending_thread = wantList->at(i); 1064 Handle th(current_thread, pending_thread->threadObj()); 1065 ret.waiters[i] = (jthread)jni_reference(calling_thread, th); 1066 } 1067 } 1068 if (nWait > 0) { 1069 // we have threads in Object.wait() 1070 int offset = nWant; // add after any contending threads 1071 ObjectWaiter *waiter = mon->first_waiter(); 1072 for (int i = 0, j = 0; i < nWait; i++) { 1073 if (waiter == NULL) { 1074 // robustness: the waiting list has gotten smaller 1075 nWait = j; 1076 break; 1077 } 1078 Thread *t = mon->thread_of_waiter(waiter); 1079 if (t != NULL && t->is_Java_thread()) { 1080 JavaThread *wjava_thread = (JavaThread *)t; 1081 // If the thread was found on the ObjectWaiter list, then 1082 // it has not been notified. This thread can't change the 1083 // state of the monitor so it doesn't need to be suspended. 1084 Handle th(current_thread, wjava_thread->threadObj()); 1085 ret.waiters[offset + j] = (jthread)jni_reference(calling_thread, th); 1086 ret.notify_waiters[j++] = (jthread)jni_reference(calling_thread, th); 1087 } 1088 waiter = mon->next_waiter(waiter); 1089 } 1090 } 1091 } // ThreadsListHandle is destroyed here. 1092 1093 // Adjust count. nWant and nWait count values may be less than original. 1094 ret.waiter_count = nWant + nWait; 1095 ret.notify_waiter_count = nWait; 1096 } else { 1097 // this object has a lightweight monitor and we have nothing more 1098 // to do here because the defaults are just fine. 1099 } 1100 1101 // we don't update return parameter unless everything worked 1102 *info_ptr = ret; 1103 1104 return JVMTI_ERROR_NONE; 1105 } 1106 1107 ResourceTracker::ResourceTracker(JvmtiEnv* env) { 1108 _env = env; 1109 _allocations = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<unsigned char*>(20, mtServiceability); 1110 _failed = false; 1111 } 1112 ResourceTracker::~ResourceTracker() { 1113 if (_failed) { 1114 for (int i=0; i<_allocations->length(); i++) { 1115 _env->deallocate(_allocations->at(i)); 1116 } 1117 } 1118 delete _allocations; 1119 } 1120 1121 jvmtiError ResourceTracker::allocate(jlong size, unsigned char** mem_ptr) { 1122 unsigned char *ptr; 1123 jvmtiError err = _env->allocate(size, &ptr); 1124 if (err == JVMTI_ERROR_NONE) { 1125 _allocations->append(ptr); 1126 *mem_ptr = ptr; 1127 } else { 1128 *mem_ptr = NULL; 1129 _failed = true; 1130 } 1131 return err; 1132 } 1133 1134 unsigned char* ResourceTracker::allocate(jlong size) { 1135 unsigned char* ptr; 1136 allocate(size, &ptr); 1137 return ptr; 1138 } 1139 1140 char* ResourceTracker::strdup(const char* str) { 1141 char *dup_str = (char*)allocate(strlen(str)+1); 1142 if (dup_str != NULL) { 1143 strcpy(dup_str, str); 1144 } 1145 return dup_str; 1146 } 1147 1148 struct StackInfoNode { 1149 struct StackInfoNode *next; 1150 jvmtiStackInfo info; 1151 }; 1152 1153 // Create a jvmtiStackInfo inside a linked list node and create a 1154 // buffer for the frame information, both allocated as resource objects. 1155 // Fill in both the jvmtiStackInfo and the jvmtiFrameInfo. 1156 // Note that either or both of thr and thread_oop 1157 // may be null if the thread is new or has exited. 1158 void 1159 MultipleStackTracesCollector::fill_frames(jthread jt, JavaThread *thr, oop thread_oop) { 1160 #ifdef ASSERT 1161 Thread *current_thread = Thread::current(); 1162 assert(current_thread == thr || 1163 SafepointSynchronize::is_at_safepoint() || 1164 current_thread == thr->active_handshaker(), 1165 "call by myself / at safepoint / at handshake"); 1166 #endif 1167 1168 jint state = 0; 1169 struct StackInfoNode *node = NEW_RESOURCE_OBJ(struct StackInfoNode); 1170 jvmtiStackInfo *infop = &(node->info); 1171 node->next = head(); 1172 set_head(node); 1173 infop->frame_count = 0; 1174 infop->thread = jt; 1175 1176 if (thread_oop != NULL) { 1177 // get most state bits 1178 state = (jint)java_lang_Thread::get_thread_status(thread_oop); 1179 } 1180 1181 if (thr != NULL) { // add more state bits if there is a JavaThead to query 1182 // same as is_being_ext_suspended() but without locking 1183 if (thr->is_ext_suspended() || thr->is_external_suspend()) { 1184 state |= JVMTI_THREAD_STATE_SUSPENDED; 1185 } 1186 JavaThreadState jts = thr->thread_state(); 1187 if (jts == _thread_in_native) { 1188 state |= JVMTI_THREAD_STATE_IN_NATIVE; 1189 } 1190 if (thr->is_interrupted(false)) { 1191 state |= JVMTI_THREAD_STATE_INTERRUPTED; 1192 } 1193 } 1194 infop->state = state; 1195 1196 if (thr != NULL && (state & JVMTI_THREAD_STATE_ALIVE) != 0) { 1197 infop->frame_buffer = NEW_RESOURCE_ARRAY(jvmtiFrameInfo, max_frame_count()); 1198 env()->get_stack_trace(thr, 0, max_frame_count(), 1199 infop->frame_buffer, &(infop->frame_count)); 1200 } else { 1201 infop->frame_buffer = NULL; 1202 infop->frame_count = 0; 1203 } 1204 _frame_count_total += infop->frame_count; 1205 } 1206 1207 // Based on the stack information in the linked list, allocate memory 1208 // block to return and fill it from the info in the linked list. 1209 void 1210 MultipleStackTracesCollector::allocate_and_fill_stacks(jint thread_count) { 1211 // do I need to worry about alignment issues? 1212 jlong alloc_size = thread_count * sizeof(jvmtiStackInfo) 1213 + _frame_count_total * sizeof(jvmtiFrameInfo); 1214 env()->allocate(alloc_size, (unsigned char **)&_stack_info); 1215 1216 // pointers to move through the newly allocated space as it is filled in 1217 jvmtiStackInfo *si = _stack_info + thread_count; // bottom of stack info 1218 jvmtiFrameInfo *fi = (jvmtiFrameInfo *)si; // is the top of frame info 1219 1220 // copy information in resource area into allocated buffer 1221 // insert stack info backwards since linked list is backwards 1222 // insert frame info forwards 1223 // walk the StackInfoNodes 1224 for (struct StackInfoNode *sin = head(); sin != NULL; sin = sin->next) { 1225 jint frame_count = sin->info.frame_count; 1226 size_t frames_size = frame_count * sizeof(jvmtiFrameInfo); 1227 --si; 1228 memcpy(si, &(sin->info), sizeof(jvmtiStackInfo)); 1229 if (frames_size == 0) { 1230 si->frame_buffer = NULL; 1231 } else { 1232 memcpy(fi, sin->info.frame_buffer, frames_size); 1233 si->frame_buffer = fi; // point to the new allocated copy of the frames 1234 fi += frame_count; 1235 } 1236 } 1237 assert(si == _stack_info, "the last copied stack info must be the first record"); 1238 assert((unsigned char *)fi == ((unsigned char *)_stack_info) + alloc_size, 1239 "the last copied frame info must be the last record"); 1240 } 1241 1242 1243 void 1244 VM_GetThreadListStackTraces::doit() { 1245 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1246 1247 ResourceMark rm; 1248 ThreadsListHandle tlh; 1249 for (int i = 0; i < _thread_count; ++i) { 1250 jthread jt = _thread_list[i]; 1251 JavaThread* java_thread = NULL; 1252 oop thread_oop = NULL; 1253 jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), jt, &java_thread, &thread_oop); 1254 if (err != JVMTI_ERROR_NONE) { 1255 // We got an error code so we don't have a JavaThread *, but 1256 // only return an error from here if we didn't get a valid 1257 // thread_oop. 1258 if (thread_oop == NULL) { 1259 _collector.set_result(err); 1260 return; 1261 } 1262 // We have a valid thread_oop. 1263 } 1264 _collector.fill_frames(jt, java_thread, thread_oop); 1265 } 1266 _collector.allocate_and_fill_stacks(_thread_count); 1267 } 1268 1269 void 1270 GetSingleStackTraceClosure::do_thread(Thread *target) { 1271 assert(target->is_Java_thread(), "just checking"); 1272 JavaThread *jt = (JavaThread *)target; 1273 oop thread_oop = jt->threadObj(); 1274 1275 if (!jt->is_exiting() && thread_oop != NULL) { 1276 ResourceMark rm; 1277 _collector.fill_frames(_jthread, jt, thread_oop); 1278 _collector.allocate_and_fill_stacks(1); 1279 } 1280 } 1281 1282 void 1283 VM_GetAllStackTraces::doit() { 1284 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1285 1286 ResourceMark rm; 1287 _final_thread_count = 0; 1288 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1289 oop thread_oop = jt->threadObj(); 1290 if (thread_oop != NULL && 1291 !jt->is_exiting() && 1292 java_lang_Thread::is_alive(thread_oop) && 1293 !jt->is_hidden_from_external_view()) { 1294 ++_final_thread_count; 1295 // Handle block of the calling thread is used to create local refs. 1296 _collector.fill_frames((jthread)JNIHandles::make_local(_calling_thread, thread_oop), 1297 jt, thread_oop); 1298 } 1299 } 1300 _collector.allocate_and_fill_stacks(_final_thread_count); 1301 } 1302 1303 // Verifies that the top frame is a java frame in an expected state. 1304 // Deoptimizes frame if needed. 1305 // Checks that the frame method signature matches the return type (tos). 1306 // HandleMark must be defined in the caller only. 1307 // It is to keep a ret_ob_h handle alive after return to the caller. 1308 jvmtiError 1309 JvmtiEnvBase::check_top_frame(JavaThread* current_thread, JavaThread* java_thread, 1310 jvalue value, TosState tos, Handle* ret_ob_h) { 1311 ResourceMark rm(current_thread); 1312 1313 vframe *vf = vframeFor(java_thread, 0); 1314 NULL_CHECK(vf, JVMTI_ERROR_NO_MORE_FRAMES); 1315 1316 javaVFrame *jvf = (javaVFrame*) vf; 1317 if (!vf->is_java_frame() || jvf->method()->is_native()) { 1318 return JVMTI_ERROR_OPAQUE_FRAME; 1319 } 1320 1321 // If the frame is a compiled one, need to deoptimize it. 1322 if (vf->is_compiled_frame()) { 1323 if (!vf->fr().can_be_deoptimized()) { 1324 return JVMTI_ERROR_OPAQUE_FRAME; 1325 } 1326 Deoptimization::deoptimize_frame(java_thread, jvf->fr().id()); 1327 } 1328 1329 // Get information about method return type 1330 Symbol* signature = jvf->method()->signature(); 1331 1332 ResultTypeFinder rtf(signature); 1333 TosState fr_tos = as_TosState(rtf.type()); 1334 if (fr_tos != tos) { 1335 if (tos != itos || (fr_tos != btos && fr_tos != ztos && fr_tos != ctos && fr_tos != stos)) { 1336 return JVMTI_ERROR_TYPE_MISMATCH; 1337 } 1338 } 1339 1340 // Check that the jobject class matches the return type signature. 1341 jobject jobj = value.l; 1342 if (tos == atos && jobj != NULL) { // NULL reference is allowed 1343 Handle ob_h(current_thread, JNIHandles::resolve_external_guard(jobj)); 1344 NULL_CHECK(ob_h, JVMTI_ERROR_INVALID_OBJECT); 1345 Klass* ob_k = ob_h()->klass(); 1346 NULL_CHECK(ob_k, JVMTI_ERROR_INVALID_OBJECT); 1347 1348 // Method return type signature. 1349 char* ty_sign = 1 + strchr(signature->as_C_string(), JVM_SIGNATURE_ENDFUNC); 1350 1351 if (!VM_GetOrSetLocal::is_assignable(ty_sign, ob_k, current_thread)) { 1352 return JVMTI_ERROR_TYPE_MISMATCH; 1353 } 1354 *ret_ob_h = ob_h; 1355 } 1356 return JVMTI_ERROR_NONE; 1357 } /* end check_top_frame */ 1358 1359 1360 // ForceEarlyReturn<type> follows the PopFrame approach in many aspects. 1361 // Main difference is on the last stage in the interpreter. 1362 // The PopFrame stops method execution to continue execution 1363 // from the same method call instruction. 1364 // The ForceEarlyReturn forces return from method so the execution 1365 // continues at the bytecode following the method call. 1366 1367 // Threads_lock NOT held, java_thread not protected by lock 1368 // java_thread - pre-checked 1369 1370 jvmtiError 1371 JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState tos) { 1372 JavaThread* current_thread = JavaThread::current(); 1373 HandleMark hm(current_thread); 1374 uint32_t debug_bits = 0; 1375 1376 // retrieve or create the state 1377 JvmtiThreadState* state = JvmtiThreadState::state_for(java_thread); 1378 if (state == NULL) { 1379 return JVMTI_ERROR_THREAD_NOT_ALIVE; 1380 } 1381 1382 // Check if java_thread is fully suspended 1383 if (!java_thread->is_thread_fully_suspended(true /* wait for suspend completion */, &debug_bits)) { 1384 return JVMTI_ERROR_THREAD_NOT_SUSPENDED; 1385 } 1386 1387 // Check to see if a ForceEarlyReturn was already in progress 1388 if (state->is_earlyret_pending()) { 1389 // Probably possible for JVMTI clients to trigger this, but the 1390 // JPDA backend shouldn't allow this to happen 1391 return JVMTI_ERROR_INTERNAL; 1392 } 1393 { 1394 // The same as for PopFrame. Workaround bug: 1395 // 4812902: popFrame hangs if the method is waiting at a synchronize 1396 // Catch this condition and return an error to avoid hanging. 1397 // Now JVMTI spec allows an implementation to bail out with an opaque 1398 // frame error. 1399 OSThread* osThread = java_thread->osthread(); 1400 if (osThread->get_state() == MONITOR_WAIT) { 1401 return JVMTI_ERROR_OPAQUE_FRAME; 1402 } 1403 } 1404 Handle ret_ob_h; 1405 jvmtiError err = check_top_frame(current_thread, java_thread, value, tos, &ret_ob_h); 1406 if (err != JVMTI_ERROR_NONE) { 1407 return err; 1408 } 1409 assert(tos != atos || value.l == NULL || ret_ob_h() != NULL, 1410 "return object oop must not be NULL if jobject is not NULL"); 1411 1412 // Update the thread state to reflect that the top frame must be 1413 // forced to return. 1414 // The current frame will be returned later when the suspended 1415 // thread is resumed and right before returning from VM to Java. 1416 // (see call_VM_base() in assembler_<cpu>.cpp). 1417 1418 state->set_earlyret_pending(); 1419 state->set_earlyret_oop(ret_ob_h()); 1420 state->set_earlyret_value(value, tos); 1421 1422 // Set pending step flag for this early return. 1423 // It is cleared when next step event is posted. 1424 state->set_pending_step_for_earlyret(); 1425 1426 return JVMTI_ERROR_NONE; 1427 } /* end force_early_return */ 1428 1429 void 1430 JvmtiMonitorClosure::do_monitor(ObjectMonitor* mon) { 1431 if ( _error != JVMTI_ERROR_NONE) { 1432 // Error occurred in previous iteration so no need to add 1433 // to the list. 1434 return; 1435 } 1436 if (mon->owner() == _java_thread ) { 1437 // Filter out on stack monitors collected during stack walk. 1438 oop obj = (oop)mon->object(); 1439 bool found = false; 1440 for (int j = 0; j < _owned_monitors_list->length(); j++) { 1441 jobject jobj = ((jvmtiMonitorStackDepthInfo*)_owned_monitors_list->at(j))->monitor; 1442 oop check = JNIHandles::resolve(jobj); 1443 if (check == obj) { 1444 // On stack monitor already collected during the stack walk. 1445 found = true; 1446 break; 1447 } 1448 } 1449 if (found == false) { 1450 // This is off stack monitor (e.g. acquired via jni MonitorEnter). 1451 jvmtiError err; 1452 jvmtiMonitorStackDepthInfo *jmsdi; 1453 err = _env->allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsigned char **)&jmsdi); 1454 if (err != JVMTI_ERROR_NONE) { 1455 _error = err; 1456 return; 1457 } 1458 Handle hobj(Thread::current(), obj); 1459 jmsdi->monitor = _env->jni_reference(_calling_thread, hobj); 1460 // stack depth is unknown for this monitor. 1461 jmsdi->stack_depth = -1; 1462 _owned_monitors_list->append(jmsdi); 1463 } 1464 } 1465 } 1466 1467 GrowableArray<OopHandle>* JvmtiModuleClosure::_tbl = NULL; 1468 1469 void JvmtiModuleClosure::do_module(ModuleEntry* entry) { 1470 assert_locked_or_safepoint(Module_lock); 1471 OopHandle module = entry->module_handle(); 1472 guarantee(module.resolve() != NULL, "module object is NULL"); 1473 _tbl->push(module); 1474 } 1475 1476 jvmtiError 1477 JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobject** modules_ptr) { 1478 ResourceMark rm; 1479 MutexLocker mcld(ClassLoaderDataGraph_lock); 1480 MutexLocker ml(Module_lock); 1481 1482 _tbl = new GrowableArray<OopHandle>(77); 1483 if (_tbl == NULL) { 1484 return JVMTI_ERROR_OUT_OF_MEMORY; 1485 } 1486 1487 // Iterate over all the modules loaded to the system. 1488 ClassLoaderDataGraph::modules_do(&do_module); 1489 1490 jint len = _tbl->length(); 1491 guarantee(len > 0, "at least one module must be present"); 1492 1493 jobject* array = (jobject*)env->jvmtiMalloc((jlong)(len * sizeof(jobject))); 1494 if (array == NULL) { 1495 return JVMTI_ERROR_OUT_OF_MEMORY; 1496 } 1497 for (jint idx = 0; idx < len; idx++) { 1498 array[idx] = JNIHandles::make_local(Thread::current(), _tbl->at(idx).resolve()); 1499 } 1500 _tbl = NULL; 1501 *modules_ptr = array; 1502 *module_count_ptr = len; 1503 return JVMTI_ERROR_NONE; 1504 } 1505 1506 void 1507 VM_UpdateForPopTopFrame::doit() { 1508 JavaThread* jt = _state->get_thread(); 1509 ThreadsListHandle tlh; 1510 if (jt != NULL && tlh.includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) { 1511 _state->update_for_pop_top_frame(); 1512 } else { 1513 _result = JVMTI_ERROR_THREAD_NOT_ALIVE; 1514 } 1515 } 1516 1517 void 1518 VM_SetFramePop::doit() { 1519 JavaThread* jt = _state->get_thread(); 1520 ThreadsListHandle tlh; 1521 if (jt != NULL && tlh.includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) { 1522 int frame_number = _state->count_frames() - _depth; 1523 _state->env_thread_state((JvmtiEnvBase*)_env)->set_frame_pop(frame_number); 1524 } else { 1525 _result = JVMTI_ERROR_THREAD_NOT_ALIVE; 1526 } 1527 } 1528 1529 void 1530 GetOwnedMonitorInfoClosure::do_thread(Thread *target) { 1531 assert(target->is_Java_thread(), "just checking"); 1532 JavaThread *jt = (JavaThread *)target; 1533 if (!jt->is_exiting() && (jt->threadObj() != NULL)) { 1534 _result = ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread, 1535 jt, 1536 _owned_monitors_list); 1537 } 1538 } 1539 1540 void 1541 GetCurrentContendedMonitorClosure::do_thread(Thread *target) { 1542 assert(target->is_Java_thread(), "just checking"); 1543 JavaThread *jt = (JavaThread *)target; 1544 if (!jt->is_exiting() && (jt->threadObj() != NULL)) { 1545 _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread, 1546 jt, 1547 _owned_monitor_ptr); 1548 } 1549 } 1550 1551 void 1552 GetStackTraceClosure::do_thread(Thread *target) { 1553 assert(target->is_Java_thread(), "just checking"); 1554 JavaThread *jt = (JavaThread *)target; 1555 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1556 _result = ((JvmtiEnvBase *)_env)->get_stack_trace(jt, 1557 _start_depth, _max_count, 1558 _frame_buffer, _count_ptr); 1559 } 1560 } 1561 1562 void 1563 GetFrameCountClosure::do_thread(Thread *target) { 1564 JavaThread* jt = _state->get_thread(); 1565 assert(target == jt, "just checking"); 1566 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1567 _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr); 1568 } 1569 } 1570 1571 void 1572 GetFrameLocationClosure::do_thread(Thread *target) { 1573 assert(target->is_Java_thread(), "just checking"); 1574 JavaThread *jt = (JavaThread *)target; 1575 if (!jt->is_exiting() && jt->threadObj() != NULL) { 1576 _result = ((JvmtiEnvBase*)_env)->get_frame_location(jt, _depth, 1577 _method_ptr, _location_ptr); 1578 } 1579 }