1 /* 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 #include "runtime/atomic.hpp" 27 #include "runtime/interfaceSupport.hpp" 28 #include "runtime/mutexLocker.hpp" 29 #include "runtime/safepoint.hpp" 30 #include "runtime/threadCritical.hpp" 31 #include "services/memPtr.hpp" 32 #include "services/memReporter.hpp" 33 #include "services/memTracker.hpp" 34 #include "utilities/decoder.hpp" 35 #include "utilities/globalDefinitions.hpp" 36 37 bool NMT_track_callsite = false; 38 39 // walk all 'known' threads at NMT sync point, and collect their recorders 40 void SyncThreadRecorderClosure::do_thread(Thread* thread) { 41 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 42 if (thread->is_Java_thread()) { 43 JavaThread* javaThread = (JavaThread*)thread; 44 MemRecorder* recorder = javaThread->get_recorder(); 45 if (recorder != NULL) { 46 MemTracker::enqueue_pending_recorder(recorder); 47 javaThread->set_recorder(NULL); 48 } 49 } 50 _thread_count ++; 51 } 52 53 54 MemRecorder* MemTracker::_global_recorder = NULL; 55 MemSnapshot* MemTracker::_snapshot = NULL; 56 MemBaseline MemTracker::_baseline; 57 Mutex MemTracker::_query_lock(Monitor::native, "NMT_queryLock"); 58 volatile MemRecorder* MemTracker::_merge_pending_queue = NULL; 59 volatile MemRecorder* MemTracker::_pooled_recorders = NULL; 60 MemTrackWorker* MemTracker::_worker_thread = NULL; 61 int MemTracker::_sync_point_skip_count = 0; 62 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; 63 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; 64 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; 65 int MemTracker::_thread_count = 255; 66 volatile jint MemTracker::_pooled_recorder_count = 0; 67 debug_only(intx MemTracker::_main_thread_tid = 0;) 68 debug_only(volatile jint MemTracker::_pending_recorder_count = 0;) 69 70 void MemTracker::init_tracking_options(const char* option_line) { 71 _tracking_level = NMT_off; 72 if (strncmp(option_line, "=summary", 8) == 0) { 73 _tracking_level = NMT_summary; 74 } else if (strncmp(option_line, "=detail", 8) == 0) { 75 _tracking_level = NMT_detail; 76 } 77 } 78 79 // first phase of bootstrapping, when VM is still in single-threaded mode. 80 void MemTracker::bootstrap_single_thread() { 81 if (_tracking_level > NMT_off) { 82 assert(_state == NMT_uninited, "wrong state"); 83 84 // NMT is not supported with UseMallocOnly is on. NMT can NOT 85 // handle the amount of malloc data without significantly impacting 86 // runtime performance when this flag is on. 87 if (UseMallocOnly) { 88 shutdown(NMT_use_malloc_only); 89 return; 90 } 91 92 debug_only(_main_thread_tid = os::current_thread_id();) 93 _state = NMT_bootstrapping_single_thread; 94 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 95 } 96 } 97 98 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode. 99 void MemTracker::bootstrap_multi_thread() { 100 if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) { 101 // create nmt lock for multi-thread execution 102 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 103 _state = NMT_bootstrapping_multi_thread; 104 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 105 } 106 } 107 108 // fully start nmt 109 void MemTracker::start() { 110 // Native memory tracking is off from command line option 111 if (_tracking_level == NMT_off || shutdown_in_progress()) return; 112 113 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 114 assert(_state == NMT_bootstrapping_multi_thread, "wrong state"); 115 116 _snapshot = new (std::nothrow)MemSnapshot(); 117 if (_snapshot != NULL && !_snapshot->out_of_memory()) { 118 if (start_worker()) { 119 _state = NMT_started; 120 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 121 return; 122 } 123 } 124 125 // fail to start native memory tracking, shut it down 126 shutdown(NMT_initialization); 127 } 128 129 /** 130 * Shutting down native memory tracking. 131 * We can not shutdown native memory tracking immediately, so we just 132 * setup shutdown pending flag, every native memory tracking component 133 * should orderly shut itself down. 134 * 135 * The shutdown sequences: 136 * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state 137 * 2. Worker thread calls MemTracker::final_shutdown(), which transites 138 * MemTracker to final shutdown state. 139 * 3. At sync point, MemTracker does final cleanup, before sets memory 140 * tracking level to off to complete shutdown. 141 */ 142 void MemTracker::shutdown(ShutdownReason reason) { 143 if (_tracking_level == NMT_off) return; 144 145 if (_state <= NMT_bootstrapping_single_thread) { 146 // we still in single thread mode, there is not contention 147 _state = NMT_shutdown_pending; 148 _reason = reason; 149 } else { 150 // we want to know who initialized shutdown 151 if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending, 152 (jint*)&_state, (jint)NMT_started)) { 153 _reason = reason; 154 } 155 } 156 } 157 158 // final phase of shutdown 159 void MemTracker::final_shutdown() { 160 // delete all pending recorders and pooled recorders 161 delete_all_pending_recorders(); 162 delete_all_pooled_recorders(); 163 164 { 165 // shared baseline and snapshot are the only objects needed to 166 // create query results 167 MutexLockerEx locker(&_query_lock, true); 168 // cleanup baseline data and snapshot 169 _baseline.clear(); 170 delete _snapshot; 171 _snapshot = NULL; 172 } 173 174 // shutdown shared decoder instance, since it is only 175 // used by native memory tracking so far. 176 Decoder::shutdown(); 177 178 MemTrackWorker* worker = NULL; 179 { 180 ThreadCritical tc; 181 // can not delete worker inside the thread critical 182 if (_worker_thread != NULL && Thread::current() == _worker_thread) { 183 worker = _worker_thread; 184 _worker_thread = NULL; 185 } 186 } 187 if (worker != NULL) { 188 delete worker; 189 } 190 _state = NMT_final_shutdown; 191 } 192 193 // delete all pooled recorders 194 void MemTracker::delete_all_pooled_recorders() { 195 // free all pooled recorders 196 volatile MemRecorder* cur_head = _pooled_recorders; 197 if (cur_head != NULL) { 198 MemRecorder* null_ptr = NULL; 199 while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, 200 (void*)&_pooled_recorders, (void*)cur_head)) { 201 cur_head = _pooled_recorders; 202 } 203 if (cur_head != NULL) { 204 delete cur_head; 205 _pooled_recorder_count = 0; 206 } 207 } 208 } 209 210 // delete all recorders in pending queue 211 void MemTracker::delete_all_pending_recorders() { 212 // free all pending recorders 213 MemRecorder* pending_head = get_pending_recorders(); 214 if (pending_head != NULL) { 215 delete pending_head; 216 } 217 } 218 219 /* 220 * retrieve per-thread recorder of specified thread. 221 * if thread == NULL, it means global recorder 222 */ 223 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) { 224 if (shutdown_in_progress()) return NULL; 225 226 MemRecorder* rc; 227 if (thread == NULL) { 228 rc = _global_recorder; 229 } else { 230 rc = thread->get_recorder(); 231 } 232 233 if (rc != NULL && rc->is_full()) { 234 enqueue_pending_recorder(rc); 235 rc = NULL; 236 } 237 238 if (rc == NULL) { 239 rc = get_new_or_pooled_instance(); 240 if (thread == NULL) { 241 _global_recorder = rc; 242 } else { 243 thread->set_recorder(rc); 244 } 245 } 246 return rc; 247 } 248 249 /* 250 * get a per-thread recorder from pool, or create a new one if 251 * there is not one available. 252 */ 253 MemRecorder* MemTracker::get_new_or_pooled_instance() { 254 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders); 255 if (cur_head == NULL) { 256 MemRecorder* rec = new (std::nothrow)MemRecorder(); 257 if (rec == NULL || rec->out_of_memory()) { 258 shutdown(NMT_out_of_memory); 259 if (rec != NULL) { 260 delete rec; 261 rec = NULL; 262 } 263 } 264 return rec; 265 } else { 266 MemRecorder* next_head = cur_head->next(); 267 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders, 268 (void*)cur_head)) { 269 return get_new_or_pooled_instance(); 270 } 271 cur_head->set_next(NULL); 272 Atomic::dec(&_pooled_recorder_count); 273 debug_only(cur_head->set_generation();) 274 return cur_head; 275 } 276 } 277 278 /* 279 * retrieve all recorders in pending queue, and empty the queue 280 */ 281 MemRecorder* MemTracker::get_pending_recorders() { 282 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 283 MemRecorder* null_ptr = NULL; 284 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue, 285 (void*)cur_head)) { 286 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 287 } 288 debug_only(Atomic::store(0, &_pending_recorder_count)); 289 return cur_head; 290 } 291 292 /* 293 * release a recorder to recorder pool. 294 */ 295 void MemTracker::release_thread_recorder(MemRecorder* rec) { 296 assert(rec != NULL, "null recorder"); 297 // we don't want to pool too many recorders 298 rec->set_next(NULL); 299 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) { 300 delete rec; 301 return; 302 } 303 304 rec->clear(); 305 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders); 306 rec->set_next(cur_head); 307 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders, 308 (void*)cur_head)) { 309 cur_head = const_cast<MemRecorder*>(_pooled_recorders); 310 rec->set_next(cur_head); 311 } 312 Atomic::inc(&_pooled_recorder_count); 313 } 314 315 /* 316 * This is the most important method in whole nmt implementation. 317 * 318 * Create a memory record. 319 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM 320 * still in single thread mode. 321 * 2. For all threads other than JavaThread, ThreadCritical is needed 322 * to write to recorders to global recorder. 323 * 3. For JavaThreads that are not longer visible by safepoint, also 324 * need to take ThreadCritical and records are written to global 325 * recorders, since these threads are NOT walked by Threads.do_thread(). 326 * 4. JavaThreads that are running in native state, have to transition 327 * to VM state before writing to per-thread recorders. 328 * 5. JavaThreads that are running in VM state do not need any lock and 329 * records are written to per-thread recorders. 330 * 6. For a thread has yet to attach VM 'Thread', they need to take 331 * ThreadCritical to write to global recorder. 332 * 333 * Important note: 334 * NO LOCK should be taken inside ThreadCritical lock !!! 335 */ 336 void MemTracker::create_memory_record(address addr, MEMFLAGS flags, 337 size_t size, address pc, Thread* thread) { 338 if (!shutdown_in_progress()) { 339 // single thread, we just write records direct to global recorder,' 340 // with any lock 341 if (_state == NMT_bootstrapping_single_thread) { 342 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 343 thread = NULL; 344 } else { 345 if (thread == NULL) { 346 // don't use Thread::current(), since it is possible that 347 // the calling thread has yet to attach to VM 'Thread', 348 // which will result assertion failure 349 thread = ThreadLocalStorage::thread(); 350 } 351 } 352 353 if (thread != NULL) { 354 #ifdef ASSERT 355 // cause assertion on stack base. This ensures that threads call 356 // Thread::record_stack_base_and_size() method, which will create 357 // thread native stack records. 358 thread->stack_base(); 359 #endif 360 // for a JavaThread, if it is running in native state, we need to transition it to 361 // VM state, so it can stop at safepoint. JavaThread running in VM state does not 362 // need lock to write records. 363 if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) { 364 if (((JavaThread*)thread)->thread_state() == _thread_in_native) { 365 ThreadInVMfromNative trans((JavaThread*)thread); 366 create_record_in_recorder(addr, flags, size, pc, thread); 367 } else { 368 create_record_in_recorder(addr, flags, size, pc, thread); 369 } 370 } else { 371 // other threads, such as worker and watcher threads, etc. need to 372 // take ThreadCritical to write to global recorder 373 ThreadCritical tc; 374 create_record_in_recorder(addr, flags, size, pc, NULL); 375 } 376 } else { 377 if (_state == NMT_bootstrapping_single_thread) { 378 // single thread, no lock needed 379 create_record_in_recorder(addr, flags, size, pc, NULL); 380 } else { 381 // for thread has yet to attach VM 'Thread', we can not use VM mutex. 382 // use native thread critical instead 383 ThreadCritical tc; 384 create_record_in_recorder(addr, flags, size, pc, NULL); 385 } 386 } 387 } 388 } 389 390 // write a record to proper recorder. No lock can be taken from this method 391 // down. 392 void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags, 393 size_t size, address pc, Thread* thread) { 394 assert(thread == NULL || thread->is_Java_thread(), "wrong thread"); 395 396 MemRecorder* rc = get_thread_recorder((JavaThread*)thread); 397 if (rc != NULL) { 398 rc->record(addr, flags, size, pc); 399 } 400 } 401 402 /** 403 * enqueue a recorder to pending queue 404 */ 405 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) { 406 assert(rec != NULL, "null recorder"); 407 408 // we are shutting down, so just delete it 409 if (shutdown_in_progress()) { 410 rec->set_next(NULL); 411 delete rec; 412 return; 413 } 414 415 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 416 rec->set_next(cur_head); 417 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue, 418 (void*)cur_head)) { 419 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 420 rec->set_next(cur_head); 421 } 422 debug_only(Atomic::inc(&_pending_recorder_count);) 423 } 424 425 /* 426 * The method is called at global safepoint 427 * during it synchronization process. 428 * 1. enqueue all JavaThreads' per-thread recorders 429 * 2. enqueue global recorder 430 * 3. retrieve all pending recorders 431 * 4. reset global sequence number generator 432 * 5. call worker's sync 433 */ 434 #define MAX_SAFEPOINTS_TO_SKIP 128 435 #define SAFE_SEQUENCE_THRESHOLD 30 436 #define HIGH_GENERATION_THRESHOLD 60 437 438 void MemTracker::sync() { 439 assert(_tracking_level > NMT_off, "NMT is not enabled"); 440 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 441 442 // Some GC tests hit large number of safepoints in short period of time 443 // without meaningful activities. We should prevent going to 444 // sync point in these cases, which can potentially exhaust generation buffer. 445 // Here is the factots to determine if we should go into sync point: 446 // 1. not to overflow sequence number 447 // 2. if we are in danger to overflow generation buffer 448 // 3. how many safepoints we already skipped sync point 449 if (_state == NMT_started) { 450 // worker thread is not ready, no one can manage generation 451 // buffer, so skip this safepoint 452 if (_worker_thread == NULL) return; 453 454 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) { 455 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint; 456 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS; 457 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) { 458 _sync_point_skip_count ++; 459 return; 460 } 461 } 462 _sync_point_skip_count = 0; 463 // walk all JavaThreads to collect recorders 464 SyncThreadRecorderClosure stc; 465 Threads::threads_do(&stc); 466 467 _thread_count = stc.get_thread_count(); 468 MemRecorder* pending_recorders = get_pending_recorders(); 469 470 { 471 // This method is running at safepoint, with ThreadCritical lock, 472 // it should guarantee that NMT is fully sync-ed. 473 ThreadCritical tc; 474 if (_global_recorder != NULL) { 475 _global_recorder->set_next(pending_recorders); 476 pending_recorders = _global_recorder; 477 _global_recorder = NULL; 478 } 479 SequenceGenerator::reset(); 480 // check _worker_thread with lock to avoid racing condition 481 if (_worker_thread != NULL) { 482 _worker_thread->at_sync_point(pending_recorders); 483 } 484 } 485 } 486 487 // now, it is the time to shut whole things off 488 if (_state == NMT_final_shutdown) { 489 _tracking_level = NMT_off; 490 491 // walk all JavaThreads to delete all recorders 492 SyncThreadRecorderClosure stc; 493 Threads::threads_do(&stc); 494 // delete global recorder 495 { 496 ThreadCritical tc; 497 if (_global_recorder != NULL) { 498 delete _global_recorder; 499 _global_recorder = NULL; 500 } 501 } 502 503 _state = NMT_shutdown; 504 } 505 } 506 507 /* 508 * Start worker thread. 509 */ 510 bool MemTracker::start_worker() { 511 assert(_worker_thread == NULL, "Just Check"); 512 _worker_thread = new (std::nothrow) MemTrackWorker(); 513 if (_worker_thread == NULL || _worker_thread->has_error()) { 514 shutdown(NMT_initialization); 515 return false; 516 } 517 _worker_thread->start(); 518 return true; 519 } 520 521 /* 522 * We need to collect a JavaThread's per-thread recorder 523 * before it exits. 524 */ 525 void MemTracker::thread_exiting(JavaThread* thread) { 526 if (is_on()) { 527 MemRecorder* rec = thread->get_recorder(); 528 if (rec != NULL) { 529 enqueue_pending_recorder(rec); 530 thread->set_recorder(NULL); 531 } 532 } 533 } 534 535 // baseline current memory snapshot 536 bool MemTracker::baseline() { 537 MutexLockerEx lock(&_query_lock, true); 538 MemSnapshot* snapshot = get_snapshot(); 539 if (snapshot != NULL) { 540 return _baseline.baseline(*snapshot, false); 541 } 542 return false; 543 } 544 545 // print memory usage from current snapshot 546 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 547 MemBaseline baseline; 548 MutexLockerEx lock(&_query_lock, true); 549 MemSnapshot* snapshot = get_snapshot(); 550 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 551 BaselineReporter reporter(out, unit); 552 reporter.report_baseline(baseline, summary_only); 553 return true; 554 } 555 return false; 556 } 557 558 // compare memory usage between current snapshot and baseline 559 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 560 MutexLockerEx lock(&_query_lock, true); 561 if (_baseline.baselined()) { 562 MemBaseline baseline; 563 MemSnapshot* snapshot = get_snapshot(); 564 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 565 BaselineReporter reporter(out, unit); 566 reporter.diff_baselines(baseline, _baseline, summary_only); 567 return true; 568 } 569 } 570 return false; 571 } 572 573 #ifndef PRODUCT 574 void MemTracker::walk_stack(int toSkip, char* buf, int len) { 575 int cur_len = 0; 576 char tmp[1024]; 577 address pc; 578 579 while (cur_len < len) { 580 pc = os::get_caller_pc(toSkip + 1); 581 if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) { 582 jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp); 583 cur_len = (int)strlen(buf); 584 } else { 585 buf[cur_len] = '\0'; 586 break; 587 } 588 toSkip ++; 589 } 590 } 591 592 void MemTracker::print_tracker_stats(outputStream* st) { 593 st->print_cr("\nMemory Tracker Stats:"); 594 st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num()); 595 st->print_cr("\tthead count = %d", _thread_count); 596 st->print_cr("\tArena instance = %d", Arena::_instance_count); 597 st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count); 598 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count); 599 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count); 600 if (_worker_thread != NULL) { 601 st->print_cr("\tWorker thread:"); 602 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count); 603 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders()); 604 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count); 605 } else { 606 st->print_cr("\tWorker thread is not started"); 607 } 608 st->print_cr(" "); 609 610 if (_snapshot != NULL) { 611 _snapshot->print_snapshot_stats(st); 612 } else { 613 st->print_cr("No snapshot"); 614 } 615 } 616 #endif 617