1 /* 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 27 #include "oops/instanceKlass.hpp" 28 #include "runtime/atomic.hpp" 29 #include "runtime/interfaceSupport.hpp" 30 #include "runtime/mutexLocker.hpp" 31 #include "runtime/safepoint.hpp" 32 #include "runtime/threadCritical.hpp" 33 #include "runtime/vm_operations.hpp" 34 #include "services/memPtr.hpp" 35 #include "services/memReporter.hpp" 36 #include "services/memTracker.hpp" 37 #include "utilities/decoder.hpp" 38 #include "utilities/defaultStream.hpp" 39 #include "utilities/globalDefinitions.hpp" 40 41 42 bool NMT_track_callsite = false; 43 44 // walk all 'known' threads at NMT sync point, and collect their recorders 45 void SyncThreadRecorderClosure::do_thread(Thread* thread) { 46 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 47 if (thread->is_Java_thread()) { 48 JavaThread* javaThread = (JavaThread*)thread; 49 MemRecorder* recorder = javaThread->get_recorder(); 50 if (recorder != NULL) { 51 MemTracker::enqueue_pending_recorder(recorder); 52 javaThread->set_recorder(NULL); 53 } 54 } 55 _thread_count ++; 56 } 57 58 59 MemRecorder* volatile MemTracker::_global_recorder = NULL; 60 MemSnapshot* MemTracker::_snapshot = NULL; 61 MemBaseline MemTracker::_baseline; 62 Mutex* MemTracker::_query_lock = NULL; 63 MemRecorder* volatile MemTracker::_merge_pending_queue = NULL; 64 MemRecorder* volatile MemTracker::_pooled_recorders = NULL; 65 MemTrackWorker* MemTracker::_worker_thread = NULL; 66 int MemTracker::_sync_point_skip_count = 0; 67 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; 68 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; 69 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; 70 int MemTracker::_thread_count = 255; 71 volatile jint MemTracker::_pooled_recorder_count = 0; 72 volatile unsigned long MemTracker::_processing_generation = 0; 73 volatile bool MemTracker::_worker_thread_idle = false; 74 volatile jint MemTracker::_pending_op_count = 0; 75 volatile bool MemTracker::_slowdown_calling_thread = false; 76 debug_only(intx MemTracker::_main_thread_tid = 0;) 77 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) 78 79 void MemTracker::init_tracking_options(const char* option_line) { 80 _tracking_level = NMT_off; 81 if (strcmp(option_line, "=summary") == 0) { 82 _tracking_level = NMT_summary; 83 } else if (strcmp(option_line, "=detail") == 0) { 84 // detail relies on a stack-walking ability that may not 85 // be available depending on platform and/or compiler flags 86 #if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 87 _tracking_level = NMT_detail; 88 #else 89 jio_fprintf(defaultStream::error_stream(), 90 "NMT detail is not supported on this platform. Using NMT summary instead.\n"); 91 _tracking_level = NMT_summary; 92 #endif 93 } else if (strcmp(option_line, "=off") != 0) { 94 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); 95 } 96 } 97 98 // first phase of bootstrapping, when VM is still in single-threaded mode. 99 void MemTracker::bootstrap_single_thread() { 100 if (_tracking_level > NMT_off) { 101 assert(_state == NMT_uninited, "wrong state"); 102 103 // NMT is not supported with UseMallocOnly is on. NMT can NOT 104 // handle the amount of malloc data without significantly impacting 105 // runtime performance when this flag is on. 106 if (UseMallocOnly) { 107 shutdown(NMT_use_malloc_only); 108 return; 109 } 110 111 _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock"); 112 if (_query_lock == NULL) { 113 shutdown(NMT_out_of_memory); 114 return; 115 } 116 117 debug_only(_main_thread_tid = os::current_thread_id();) 118 _state = NMT_bootstrapping_single_thread; 119 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 120 } 121 } 122 123 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode. 124 void MemTracker::bootstrap_multi_thread() { 125 if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) { 126 // create nmt lock for multi-thread execution 127 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 128 _state = NMT_bootstrapping_multi_thread; 129 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 130 } 131 } 132 133 // fully start nmt 134 void MemTracker::start() { 135 // Native memory tracking is off from command line option 136 if (_tracking_level == NMT_off || shutdown_in_progress()) return; 137 138 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 139 assert(_state == NMT_bootstrapping_multi_thread, "wrong state"); 140 141 _snapshot = new (std::nothrow)MemSnapshot(); 142 if (_snapshot != NULL) { 143 if (!_snapshot->out_of_memory() && start_worker(_snapshot)) { 144 _state = NMT_started; 145 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 146 return; 147 } 148 149 delete _snapshot; 150 _snapshot = NULL; 151 } 152 153 // fail to start native memory tracking, shut it down 154 shutdown(NMT_initialization); 155 } 156 157 /** 158 * Shutting down native memory tracking. 159 * We can not shutdown native memory tracking immediately, so we just 160 * setup shutdown pending flag, every native memory tracking component 161 * should orderly shut itself down. 162 * 163 * The shutdown sequences: 164 * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state 165 * 2. Worker thread calls MemTracker::final_shutdown(), which transites 166 * MemTracker to final shutdown state. 167 * 3. At sync point, MemTracker does final cleanup, before sets memory 168 * tracking level to off to complete shutdown. 169 */ 170 void MemTracker::shutdown(ShutdownReason reason) { 171 if (_tracking_level == NMT_off) return; 172 173 if (_state <= NMT_bootstrapping_single_thread) { 174 // we still in single thread mode, there is not contention 175 _state = NMT_shutdown_pending; 176 _reason = reason; 177 } else { 178 // we want to know who initialized shutdown 179 if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending, 180 (jint*)&_state, (jint)NMT_started)) { 181 _reason = reason; 182 } 183 } 184 } 185 186 // final phase of shutdown 187 void MemTracker::final_shutdown() { 188 // delete all pending recorders and pooled recorders 189 delete_all_pending_recorders(); 190 delete_all_pooled_recorders(); 191 192 { 193 // shared baseline and snapshot are the only objects needed to 194 // create query results 195 MutexLockerEx locker(_query_lock, true); 196 // cleanup baseline data and snapshot 197 _baseline.clear(); 198 delete _snapshot; 199 _snapshot = NULL; 200 } 201 202 // shutdown shared decoder instance, since it is only 203 // used by native memory tracking so far. 204 Decoder::shutdown(); 205 206 MemTrackWorker* worker = NULL; 207 { 208 ThreadCritical tc; 209 // can not delete worker inside the thread critical 210 if (_worker_thread != NULL && Thread::current() == _worker_thread) { 211 worker = _worker_thread; 212 _worker_thread = NULL; 213 } 214 } 215 if (worker != NULL) { 216 delete worker; 217 } 218 _state = NMT_final_shutdown; 219 } 220 221 // delete all pooled recorders 222 void MemTracker::delete_all_pooled_recorders() { 223 // free all pooled recorders 224 MemRecorder* volatile cur_head = _pooled_recorders; 225 if (cur_head != NULL) { 226 MemRecorder* null_ptr = NULL; 227 while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, 228 (void*)&_pooled_recorders, (void*)cur_head)) { 229 cur_head = _pooled_recorders; 230 } 231 if (cur_head != NULL) { 232 delete cur_head; 233 _pooled_recorder_count = 0; 234 } 235 } 236 } 237 238 // delete all recorders in pending queue 239 void MemTracker::delete_all_pending_recorders() { 240 // free all pending recorders 241 MemRecorder* pending_head = get_pending_recorders(); 242 if (pending_head != NULL) { 243 delete pending_head; 244 } 245 } 246 247 /* 248 * retrieve per-thread recorder of specified thread. 249 * if thread == NULL, it means global recorder 250 */ 251 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) { 252 if (shutdown_in_progress()) return NULL; 253 254 MemRecorder* rc; 255 if (thread == NULL) { 256 rc = _global_recorder; 257 } else { 258 rc = thread->get_recorder(); 259 } 260 261 if (rc != NULL && rc->is_full()) { 262 enqueue_pending_recorder(rc); 263 rc = NULL; 264 } 265 266 if (rc == NULL) { 267 rc = get_new_or_pooled_instance(); 268 if (thread == NULL) { 269 _global_recorder = rc; 270 } else { 271 thread->set_recorder(rc); 272 } 273 } 274 return rc; 275 } 276 277 /* 278 * get a per-thread recorder from pool, or create a new one if 279 * there is not one available. 280 */ 281 MemRecorder* MemTracker::get_new_or_pooled_instance() { 282 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders); 283 if (cur_head == NULL) { 284 MemRecorder* rec = new (std::nothrow)MemRecorder(); 285 if (rec == NULL || rec->out_of_memory()) { 286 shutdown(NMT_out_of_memory); 287 if (rec != NULL) { 288 delete rec; 289 rec = NULL; 290 } 291 } 292 return rec; 293 } else { 294 MemRecorder* next_head = cur_head->next(); 295 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders, 296 (void*)cur_head)) { 297 return get_new_or_pooled_instance(); 298 } 299 cur_head->set_next(NULL); 300 Atomic::dec(&_pooled_recorder_count); 301 cur_head->set_generation(); 302 return cur_head; 303 } 304 } 305 306 /* 307 * retrieve all recorders in pending queue, and empty the queue 308 */ 309 MemRecorder* MemTracker::get_pending_recorders() { 310 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 311 MemRecorder* null_ptr = NULL; 312 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue, 313 (void*)cur_head)) { 314 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 315 } 316 NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count)); 317 return cur_head; 318 } 319 320 /* 321 * release a recorder to recorder pool. 322 */ 323 void MemTracker::release_thread_recorder(MemRecorder* rec) { 324 assert(rec != NULL, "null recorder"); 325 // we don't want to pool too many recorders 326 rec->set_next(NULL); 327 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) { 328 delete rec; 329 return; 330 } 331 332 rec->clear(); 333 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders); 334 rec->set_next(cur_head); 335 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders, 336 (void*)cur_head)) { 337 cur_head = const_cast<MemRecorder*>(_pooled_recorders); 338 rec->set_next(cur_head); 339 } 340 Atomic::inc(&_pooled_recorder_count); 341 } 342 343 // write a record to proper recorder. No lock can be taken from this method 344 // down. 345 void MemTracker::write_tracking_record(address addr, MEMFLAGS flags, 346 size_t size, jint seq, address pc, JavaThread* thread) { 347 348 MemRecorder* rc = get_thread_recorder(thread); 349 if (rc != NULL) { 350 rc->record(addr, flags, size, seq, pc); 351 } 352 } 353 354 /** 355 * enqueue a recorder to pending queue 356 */ 357 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) { 358 assert(rec != NULL, "null recorder"); 359 360 // we are shutting down, so just delete it 361 if (shutdown_in_progress()) { 362 rec->set_next(NULL); 363 delete rec; 364 return; 365 } 366 367 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 368 rec->set_next(cur_head); 369 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue, 370 (void*)cur_head)) { 371 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 372 rec->set_next(cur_head); 373 } 374 NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);) 375 } 376 377 /* 378 * The method is called at global safepoint 379 * during it synchronization process. 380 * 1. enqueue all JavaThreads' per-thread recorders 381 * 2. enqueue global recorder 382 * 3. retrieve all pending recorders 383 * 4. reset global sequence number generator 384 * 5. call worker's sync 385 */ 386 #define MAX_SAFEPOINTS_TO_SKIP 128 387 #define SAFE_SEQUENCE_THRESHOLD 30 388 #define HIGH_GENERATION_THRESHOLD 60 389 #define MAX_RECORDER_THREAD_RATIO 30 390 391 void MemTracker::sync() { 392 assert(_tracking_level > NMT_off, "NMT is not enabled"); 393 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 394 395 // Some GC tests hit large number of safepoints in short period of time 396 // without meaningful activities. We should prevent going to 397 // sync point in these cases, which can potentially exhaust generation buffer. 398 // Here is the factots to determine if we should go into sync point: 399 // 1. not to overflow sequence number 400 // 2. if we are in danger to overflow generation buffer 401 // 3. how many safepoints we already skipped sync point 402 if (_state == NMT_started) { 403 // worker thread is not ready, no one can manage generation 404 // buffer, so skip this safepoint 405 if (_worker_thread == NULL) return; 406 407 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) { 408 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint; 409 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS; 410 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) { 411 _sync_point_skip_count ++; 412 return; 413 } 414 } 415 { 416 // This method is running at safepoint, with ThreadCritical lock, 417 // it should guarantee that NMT is fully sync-ed. 418 ThreadCritical tc; 419 420 // We can NOT execute NMT sync-point if there are pending tracking ops. 421 if (_pending_op_count == 0) { 422 SequenceGenerator::reset(); 423 _sync_point_skip_count = 0; 424 425 // walk all JavaThreads to collect recorders 426 SyncThreadRecorderClosure stc; 427 Threads::threads_do(&stc); 428 429 _thread_count = stc.get_thread_count(); 430 MemRecorder* pending_recorders = get_pending_recorders(); 431 432 if (_global_recorder != NULL) { 433 _global_recorder->set_next(pending_recorders); 434 pending_recorders = _global_recorder; 435 _global_recorder = NULL; 436 } 437 438 // see if NMT has too many outstanding recorder instances, it usually 439 // means that worker thread is lagging behind in processing them. 440 if (!AutoShutdownNMT) { 441 _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); 442 } 443 444 // check _worker_thread with lock to avoid racing condition 445 if (_worker_thread != NULL) { 446 _worker_thread->at_sync_point(pending_recorders, instanceKlass::number_of_instance_classes()); 447 } 448 assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); 449 } else { 450 _sync_point_skip_count ++; 451 } 452 } 453 } 454 455 // now, it is the time to shut whole things off 456 if (_state == NMT_final_shutdown) { 457 // walk all JavaThreads to delete all recorders 458 SyncThreadRecorderClosure stc; 459 Threads::threads_do(&stc); 460 // delete global recorder 461 { 462 ThreadCritical tc; 463 if (_global_recorder != NULL) { 464 delete _global_recorder; 465 _global_recorder = NULL; 466 } 467 } 468 MemRecorder* pending_recorders = get_pending_recorders(); 469 if (pending_recorders != NULL) { 470 delete pending_recorders; 471 } 472 // try at a later sync point to ensure MemRecorder instance drops to zero to 473 // completely shutdown NMT 474 if (MemRecorder::_instance_count == 0) { 475 _state = NMT_shutdown; 476 _tracking_level = NMT_off; 477 } 478 } 479 } 480 481 /* 482 * Start worker thread. 483 */ 484 bool MemTracker::start_worker(MemSnapshot* snapshot) { 485 assert(_worker_thread == NULL && _snapshot != NULL, "Just Check"); 486 _worker_thread = new (std::nothrow) MemTrackWorker(snapshot); 487 if (_worker_thread == NULL) { 488 return false; 489 } else if (_worker_thread->has_error()) { 490 delete _worker_thread; 491 _worker_thread = NULL; 492 return false; 493 } 494 _worker_thread->start(); 495 return true; 496 } 497 498 /* 499 * We need to collect a JavaThread's per-thread recorder 500 * before it exits. 501 */ 502 void MemTracker::thread_exiting(JavaThread* thread) { 503 if (is_on()) { 504 MemRecorder* rec = thread->get_recorder(); 505 if (rec != NULL) { 506 enqueue_pending_recorder(rec); 507 thread->set_recorder(NULL); 508 } 509 } 510 } 511 512 // baseline current memory snapshot 513 bool MemTracker::baseline() { 514 MutexLocker lock(_query_lock); 515 MemSnapshot* snapshot = get_snapshot(); 516 if (snapshot != NULL) { 517 return _baseline.baseline(*snapshot, false); 518 } 519 return false; 520 } 521 522 // print memory usage from current snapshot 523 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 524 MemBaseline baseline; 525 MutexLocker lock(_query_lock); 526 MemSnapshot* snapshot = get_snapshot(); 527 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 528 BaselineReporter reporter(out, unit); 529 reporter.report_baseline(baseline, summary_only); 530 return true; 531 } 532 return false; 533 } 534 535 // Whitebox API for blocking until the current generation of NMT data has been merged 536 bool MemTracker::wbtest_wait_for_data_merge() { 537 // NMT can't be shutdown while we're holding _query_lock 538 MutexLocker lock(_query_lock); 539 assert(_worker_thread != NULL, "Invalid query"); 540 // the generation at query time, so NMT will spin till this generation is processed 541 unsigned long generation_at_query_time = SequenceGenerator::current_generation(); 542 unsigned long current_processing_generation = _processing_generation; 543 // if generation counter overflown 544 bool generation_overflown = (generation_at_query_time < current_processing_generation); 545 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 546 // spin 547 while (!shutdown_in_progress()) { 548 if (!generation_overflown) { 549 if (current_processing_generation > generation_at_query_time) { 550 return true; 551 } 552 } else { 553 assert(generations_to_wrap >= 0, "Sanity check"); 554 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 555 assert(current_generations_to_wrap >= 0, "Sanity check"); 556 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient 557 if (current_generations_to_wrap > generations_to_wrap && 558 current_processing_generation > generation_at_query_time) { 559 return true; 560 } 561 } 562 563 // if worker thread is idle, but generation is not advancing, that means 564 // there is not safepoint to let NMT advance generation, force one. 565 if (_worker_thread_idle) { 566 VM_ForceSafepoint vfs; 567 VMThread::execute(&vfs); 568 } 569 MemSnapshot* snapshot = get_snapshot(); 570 if (snapshot == NULL) { 571 return false; 572 } 573 snapshot->wait(1000); 574 current_processing_generation = _processing_generation; 575 } 576 // We end up here if NMT is shutting down before our data has been merged 577 return false; 578 } 579 580 // compare memory usage between current snapshot and baseline 581 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 582 MutexLocker lock(_query_lock); 583 if (_baseline.baselined()) { 584 MemBaseline baseline; 585 MemSnapshot* snapshot = get_snapshot(); 586 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 587 BaselineReporter reporter(out, unit); 588 reporter.diff_baselines(baseline, _baseline, summary_only); 589 return true; 590 } 591 } 592 return false; 593 } 594 595 #ifndef PRODUCT 596 void MemTracker::walk_stack(int toSkip, char* buf, int len) { 597 int cur_len = 0; 598 char tmp[1024]; 599 address pc; 600 601 while (cur_len < len) { 602 pc = os::get_caller_pc(toSkip + 1); 603 if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) { 604 jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp); 605 cur_len = (int)strlen(buf); 606 } else { 607 buf[cur_len] = '\0'; 608 break; 609 } 610 toSkip ++; 611 } 612 } 613 614 void MemTracker::print_tracker_stats(outputStream* st) { 615 st->print_cr("\nMemory Tracker Stats:"); 616 st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num()); 617 st->print_cr("\tthead count = %d", _thread_count); 618 st->print_cr("\tArena instance = %d", Arena::_instance_count); 619 st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count); 620 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count); 621 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count); 622 if (_worker_thread != NULL) { 623 st->print_cr("\tWorker thread:"); 624 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count); 625 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders()); 626 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count); 627 } else { 628 st->print_cr("\tWorker thread is not started"); 629 } 630 st->print_cr(" "); 631 632 if (_snapshot != NULL) { 633 _snapshot->print_snapshot_stats(st); 634 } else { 635 st->print_cr("No snapshot"); 636 } 637 } 638 #endif 639 640 641 // Tracker Implementation 642 643 /* 644 * Create a tracker. 645 * This is a fairly complicated constructor, as it has to make two important decisions: 646 * 1) Does it need to take ThreadCritical lock to write tracking record 647 * 2) Does it need to pre-reserve a sequence number for the tracking record 648 * 649 * The rules to determine if ThreadCritical is needed: 650 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM 651 * still in single thread mode. 652 * 2. For all threads other than JavaThread, ThreadCritical is needed 653 * to write to recorders to global recorder. 654 * 3. For JavaThreads that are no longer visible by safepoint, also 655 * need to take ThreadCritical and records are written to global 656 * recorders, since these threads are NOT walked by Threads.do_thread(). 657 * 4. JavaThreads that are running in safepoint-safe states do not stop 658 * for safepoints, ThreadCritical lock should be taken to write 659 * memory records. 660 * 5. JavaThreads that are running in VM state do not need any lock and 661 * records are written to per-thread recorders. 662 * 6. For a thread has yet to attach VM 'Thread', they need to take 663 * ThreadCritical to write to global recorder. 664 * 665 * The memory operations that need pre-reserve sequence numbers: 666 * The memory operations that "release" memory blocks and the 667 * operations can fail, need to pre-reserve sequence number. They 668 * are realloc, uncommit and release. 669 * 670 * The reason for pre-reserve sequence number, is to prevent race condition: 671 * Thread 1 Thread 2 672 * <release> 673 * <allocate> 674 * <write allocate record> 675 * <write release record> 676 * if Thread 2 happens to obtain the memory address Thread 1 just released, 677 * then NMT can mistakenly report the memory is free. 678 * 679 * Noticeably, free() does not need pre-reserve sequence number, because the call 680 * does not fail, so we can alway write "release" record before the memory is actaully 681 * freed. 682 * 683 * For realloc, uncommit and release, following coding pattern should be used: 684 * 685 * MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); 686 * ptr = ::realloc(...); 687 * if (ptr != NULL) { 688 * tkr.record(...) 689 * } else { 690 * tkr.discard(); 691 * } 692 * 693 * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); 694 * if (uncommit(...)) { 695 * tkr.record(...); 696 * } else { 697 * tkr.discard(); 698 * } 699 * 700 * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 701 * if (release(...)) { 702 * tkr.record(...); 703 * } else { 704 * tkr.discard(); 705 * } 706 * 707 * Since pre-reserved sequence number is only good for the generation that it is acquired, 708 * when there is pending Tracker that reserved sequence number, NMT sync-point has 709 * to be skipped to prevent from advancing generation. This is done by inc and dec 710 * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped. 711 * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads 712 * that honor safepoints, safepoint can not occur during the memory operations, so the 713 * pre-reserved sequence number won't cross the generation boundary. 714 */ 715 MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) { 716 _op = NoOp; 717 _seq = 0; 718 if (MemTracker::is_on()) { 719 _java_thread = NULL; 720 _op = op; 721 722 // figure out if ThreadCritical lock is needed to write this operation 723 // to MemTracker 724 if (MemTracker::is_single_threaded_bootstrap()) { 725 thr = NULL; 726 } else if (thr == NULL) { 727 // don't use Thread::current(), since it is possible that 728 // the calling thread has yet to attach to VM 'Thread', 729 // which will result assertion failure 730 thr = ThreadLocalStorage::thread(); 731 } 732 733 if (thr != NULL) { 734 // Check NMT load 735 MemTracker::check_NMT_load(thr); 736 737 if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) { 738 _java_thread = (JavaThread*)thr; 739 JavaThreadState state = _java_thread->thread_state(); 740 // JavaThreads that are safepoint safe, can run through safepoint, 741 // so ThreadCritical is needed to ensure no threads at safepoint create 742 // new records while the records are being gathered and the sequence number is changing 743 _need_thread_critical_lock = 744 SafepointSynchronize::safepoint_safe(_java_thread, state); 745 } else { 746 _need_thread_critical_lock = true; 747 } 748 } else { 749 _need_thread_critical_lock 750 = !MemTracker::is_single_threaded_bootstrap(); 751 } 752 753 // see if we need to pre-reserve sequence number for this operation 754 if (_op == Realloc || _op == Uncommit || _op == Release) { 755 if (_need_thread_critical_lock) { 756 ThreadCritical tc; 757 MemTracker::inc_pending_op_count(); 758 _seq = SequenceGenerator::next(); 759 } else { 760 // for the threads that honor safepoints, no safepoint can occur 761 // during the lifespan of tracker, so we don't need to increase 762 // pending op count. 763 _seq = SequenceGenerator::next(); 764 } 765 } 766 } 767 } 768 769 void MemTracker::Tracker::discard() { 770 if (MemTracker::is_on() && _seq != 0) { 771 if (_need_thread_critical_lock) { 772 ThreadCritical tc; 773 MemTracker::dec_pending_op_count(); 774 } 775 _seq = 0; 776 } 777 } 778 779 780 void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size, 781 MEMFLAGS flags, address pc) { 782 assert(old_addr != NULL && new_addr != NULL, "Sanity check"); 783 assert(_op == Realloc || _op == NoOp, "Wrong call"); 784 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 785 assert(_seq > 0, "Need pre-reserve sequence number"); 786 if (_need_thread_critical_lock) { 787 ThreadCritical tc; 788 // free old address, use pre-reserved sequence number 789 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 790 0, _seq, pc, _java_thread); 791 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 792 size, SequenceGenerator::next(), pc, _java_thread); 793 // decrement MemTracker pending_op_count 794 MemTracker::dec_pending_op_count(); 795 } else { 796 // free old address, use pre-reserved sequence number 797 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 798 0, _seq, pc, _java_thread); 799 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 800 size, SequenceGenerator::next(), pc, _java_thread); 801 } 802 _seq = 0; 803 } 804 } 805 806 void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) { 807 // OOM already? 808 if (addr == NULL) return; 809 810 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 811 bool pre_reserved_seq = (_seq != 0); 812 address pc = CALLER_CALLER_PC; 813 MEMFLAGS orig_flags = flags; 814 815 // or the tagging flags 816 switch(_op) { 817 case Malloc: 818 flags |= MemPointerRecord::malloc_tag(); 819 break; 820 case Free: 821 flags = MemPointerRecord::free_tag(); 822 break; 823 case Realloc: 824 fatal("Use the other Tracker::record()"); 825 break; 826 case Reserve: 827 case ReserveAndCommit: 828 flags |= MemPointerRecord::virtual_memory_reserve_tag(); 829 break; 830 case Commit: 831 flags = MemPointerRecord::virtual_memory_commit_tag(); 832 break; 833 case Type: 834 flags |= MemPointerRecord::virtual_memory_type_tag(); 835 break; 836 case Uncommit: 837 assert(pre_reserved_seq, "Need pre-reserve sequence number"); 838 flags = MemPointerRecord::virtual_memory_uncommit_tag(); 839 break; 840 case Release: 841 assert(pre_reserved_seq, "Need pre-reserve sequence number"); 842 flags = MemPointerRecord::virtual_memory_release_tag(); 843 break; 844 case ArenaSize: 845 // a bit of hack here, add a small postive offset to arena 846 // address for its size record, so the size record is sorted 847 // right after arena record. 848 flags = MemPointerRecord::arena_size_tag(); 849 addr += sizeof(void*); 850 break; 851 case StackRelease: 852 flags = MemPointerRecord::virtual_memory_release_tag(); 853 break; 854 default: 855 ShouldNotReachHere(); 856 } 857 858 // write memory tracking record 859 if (_need_thread_critical_lock) { 860 ThreadCritical tc; 861 if (_seq == 0) _seq = SequenceGenerator::next(); 862 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 863 if (_op == ReserveAndCommit) { 864 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 865 size, SequenceGenerator::next(), pc, _java_thread); 866 } 867 if (pre_reserved_seq) MemTracker::dec_pending_op_count(); 868 } else { 869 if (_seq == 0) _seq = SequenceGenerator::next(); 870 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 871 if (_op == ReserveAndCommit) { 872 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 873 size, SequenceGenerator::next(), pc, _java_thread); 874 } 875 } 876 _seq = 0; 877 } 878 } 879