1 /* 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 #include "oops/instanceKlass.hpp" 27 #include "runtime/atomic.hpp" 28 #include "runtime/interfaceSupport.hpp" 29 #include "runtime/mutexLocker.hpp" 30 #include "runtime/safepoint.hpp" 31 #include "runtime/threadCritical.hpp" 32 #include "runtime/vm_operations.hpp" 33 #include "services/memPtr.hpp" 34 #include "services/memReporter.hpp" 35 #include "services/memTracker.hpp" 36 #include "utilities/decoder.hpp" 37 #include "utilities/defaultStream.hpp" 38 #include "utilities/globalDefinitions.hpp" 39 40 bool NMT_track_callsite = false; 41 42 // walk all 'known' threads at NMT sync point, and collect their recorders 43 void SyncThreadRecorderClosure::do_thread(Thread* thread) { 44 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 45 if (thread->is_Java_thread()) { 46 JavaThread* javaThread = (JavaThread*)thread; 47 MemRecorder* recorder = javaThread->get_recorder(); 48 if (recorder != NULL) { 49 MemTracker::enqueue_pending_recorder(recorder); 50 javaThread->set_recorder(NULL); 51 } 52 } 53 _thread_count ++; 54 } 55 56 57 MemRecorder* volatile MemTracker::_global_recorder = NULL; 58 MemSnapshot* MemTracker::_snapshot = NULL; 59 MemBaseline MemTracker::_baseline; 60 Mutex* MemTracker::_query_lock = NULL; 61 MemRecorder* volatile MemTracker::_merge_pending_queue = NULL; 62 MemRecorder* volatile MemTracker::_pooled_recorders = NULL; 63 MemTrackWorker* MemTracker::_worker_thread = NULL; 64 int MemTracker::_sync_point_skip_count = 0; 65 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; 66 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; 67 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; 68 int MemTracker::_thread_count = 255; 69 volatile jint MemTracker::_pooled_recorder_count = 0; 70 volatile unsigned long MemTracker::_processing_generation = 0; 71 volatile bool MemTracker::_worker_thread_idle = false; 72 volatile jint MemTracker::_pending_op_count = 0; 73 volatile bool MemTracker::_slowdown_calling_thread = false; 74 debug_only(intx MemTracker::_main_thread_tid = 0;) 75 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) 76 77 void MemTracker::init_tracking_options(const char* option_line) { 78 _tracking_level = NMT_off; 79 if (strcmp(option_line, "=summary") == 0) { 80 _tracking_level = NMT_summary; 81 } else if (strcmp(option_line, "=detail") == 0) { 82 // detail relies on a stack-walking ability that may not 83 // be available depending on platform and/or compiler flags 84 if (PLATFORM_NMT_DETAIL_SUPPORTED) { 85 _tracking_level = NMT_detail; 86 } else { 87 jio_fprintf(defaultStream::error_stream(), 88 "NMT detail is not supported on this platform. Using NMT summary instead."); 89 _tracking_level = NMT_summary; 90 } 91 } else if (strcmp(option_line, "=off") != 0) { 92 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); 93 } 94 } 95 96 // first phase of bootstrapping, when VM is still in single-threaded mode. 97 void MemTracker::bootstrap_single_thread() { 98 if (_tracking_level > NMT_off) { 99 assert(_state == NMT_uninited, "wrong state"); 100 101 // NMT is not supported with UseMallocOnly is on. NMT can NOT 102 // handle the amount of malloc data without significantly impacting 103 // runtime performance when this flag is on. 104 if (UseMallocOnly) { 105 shutdown(NMT_use_malloc_only); 106 return; 107 } 108 109 _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock"); 110 if (_query_lock == NULL) { 111 shutdown(NMT_out_of_memory); 112 return; 113 } 114 115 debug_only(_main_thread_tid = os::current_thread_id();) 116 _state = NMT_bootstrapping_single_thread; 117 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 118 } 119 } 120 121 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode. 122 void MemTracker::bootstrap_multi_thread() { 123 if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) { 124 // create nmt lock for multi-thread execution 125 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 126 _state = NMT_bootstrapping_multi_thread; 127 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 128 } 129 } 130 131 // fully start nmt 132 void MemTracker::start() { 133 // Native memory tracking is off from command line option 134 if (_tracking_level == NMT_off || shutdown_in_progress()) return; 135 136 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 137 assert(_state == NMT_bootstrapping_multi_thread, "wrong state"); 138 139 _snapshot = new (std::nothrow)MemSnapshot(); 140 if (_snapshot != NULL) { 141 if (!_snapshot->out_of_memory() && start_worker(_snapshot)) { 142 _state = NMT_started; 143 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 144 return; 145 } 146 147 delete _snapshot; 148 _snapshot = NULL; 149 } 150 151 // fail to start native memory tracking, shut it down 152 shutdown(NMT_initialization); 153 } 154 155 /** 156 * Shutting down native memory tracking. 157 * We can not shutdown native memory tracking immediately, so we just 158 * setup shutdown pending flag, every native memory tracking component 159 * should orderly shut itself down. 160 * 161 * The shutdown sequences: 162 * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state 163 * 2. Worker thread calls MemTracker::final_shutdown(), which transites 164 * MemTracker to final shutdown state. 165 * 3. At sync point, MemTracker does final cleanup, before sets memory 166 * tracking level to off to complete shutdown. 167 */ 168 void MemTracker::shutdown(ShutdownReason reason) { 169 if (_tracking_level == NMT_off) return; 170 171 if (_state <= NMT_bootstrapping_single_thread) { 172 // we still in single thread mode, there is not contention 173 _state = NMT_shutdown_pending; 174 _reason = reason; 175 } else { 176 // we want to know who initialized shutdown 177 if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending, 178 (jint*)&_state, (jint)NMT_started)) { 179 _reason = reason; 180 } 181 } 182 } 183 184 // final phase of shutdown 185 void MemTracker::final_shutdown() { 186 // delete all pending recorders and pooled recorders 187 delete_all_pending_recorders(); 188 delete_all_pooled_recorders(); 189 190 { 191 // shared baseline and snapshot are the only objects needed to 192 // create query results 193 MutexLockerEx locker(_query_lock, true); 194 // cleanup baseline data and snapshot 195 _baseline.clear(); 196 delete _snapshot; 197 _snapshot = NULL; 198 } 199 200 // shutdown shared decoder instance, since it is only 201 // used by native memory tracking so far. 202 Decoder::shutdown(); 203 204 MemTrackWorker* worker = NULL; 205 { 206 ThreadCritical tc; 207 // can not delete worker inside the thread critical 208 if (_worker_thread != NULL && Thread::current() == _worker_thread) { 209 worker = _worker_thread; 210 _worker_thread = NULL; 211 } 212 } 213 if (worker != NULL) { 214 delete worker; 215 } 216 _state = NMT_final_shutdown; 217 } 218 219 // delete all pooled recorders 220 void MemTracker::delete_all_pooled_recorders() { 221 // free all pooled recorders 222 MemRecorder* volatile cur_head = _pooled_recorders; 223 if (cur_head != NULL) { 224 MemRecorder* null_ptr = NULL; 225 while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, 226 (void*)&_pooled_recorders, (void*)cur_head)) { 227 cur_head = _pooled_recorders; 228 } 229 if (cur_head != NULL) { 230 delete cur_head; 231 _pooled_recorder_count = 0; 232 } 233 } 234 } 235 236 // delete all recorders in pending queue 237 void MemTracker::delete_all_pending_recorders() { 238 // free all pending recorders 239 MemRecorder* pending_head = get_pending_recorders(); 240 if (pending_head != NULL) { 241 delete pending_head; 242 } 243 } 244 245 /* 246 * retrieve per-thread recorder of specified thread. 247 * if thread == NULL, it means global recorder 248 */ 249 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) { 250 if (shutdown_in_progress()) return NULL; 251 252 MemRecorder* rc; 253 if (thread == NULL) { 254 rc = _global_recorder; 255 } else { 256 rc = thread->get_recorder(); 257 } 258 259 if (rc != NULL && rc->is_full()) { 260 enqueue_pending_recorder(rc); 261 rc = NULL; 262 } 263 264 if (rc == NULL) { 265 rc = get_new_or_pooled_instance(); 266 if (thread == NULL) { 267 _global_recorder = rc; 268 } else { 269 thread->set_recorder(rc); 270 } 271 } 272 return rc; 273 } 274 275 /* 276 * get a per-thread recorder from pool, or create a new one if 277 * there is not one available. 278 */ 279 MemRecorder* MemTracker::get_new_or_pooled_instance() { 280 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders); 281 if (cur_head == NULL) { 282 MemRecorder* rec = new (std::nothrow)MemRecorder(); 283 if (rec == NULL || rec->out_of_memory()) { 284 shutdown(NMT_out_of_memory); 285 if (rec != NULL) { 286 delete rec; 287 rec = NULL; 288 } 289 } 290 return rec; 291 } else { 292 MemRecorder* next_head = cur_head->next(); 293 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders, 294 (void*)cur_head)) { 295 return get_new_or_pooled_instance(); 296 } 297 cur_head->set_next(NULL); 298 Atomic::dec(&_pooled_recorder_count); 299 cur_head->set_generation(); 300 return cur_head; 301 } 302 } 303 304 /* 305 * retrieve all recorders in pending queue, and empty the queue 306 */ 307 MemRecorder* MemTracker::get_pending_recorders() { 308 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 309 MemRecorder* null_ptr = NULL; 310 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue, 311 (void*)cur_head)) { 312 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 313 } 314 NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count)); 315 return cur_head; 316 } 317 318 /* 319 * release a recorder to recorder pool. 320 */ 321 void MemTracker::release_thread_recorder(MemRecorder* rec) { 322 assert(rec != NULL, "null recorder"); 323 // we don't want to pool too many recorders 324 rec->set_next(NULL); 325 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) { 326 delete rec; 327 return; 328 } 329 330 rec->clear(); 331 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders); 332 rec->set_next(cur_head); 333 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders, 334 (void*)cur_head)) { 335 cur_head = const_cast<MemRecorder*>(_pooled_recorders); 336 rec->set_next(cur_head); 337 } 338 Atomic::inc(&_pooled_recorder_count); 339 } 340 341 // write a record to proper recorder. No lock can be taken from this method 342 // down. 343 void MemTracker::write_tracking_record(address addr, MEMFLAGS flags, 344 size_t size, jint seq, address pc, JavaThread* thread) { 345 346 MemRecorder* rc = get_thread_recorder(thread); 347 if (rc != NULL) { 348 rc->record(addr, flags, size, seq, pc); 349 } 350 } 351 352 /** 353 * enqueue a recorder to pending queue 354 */ 355 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) { 356 assert(rec != NULL, "null recorder"); 357 358 // we are shutting down, so just delete it 359 if (shutdown_in_progress()) { 360 rec->set_next(NULL); 361 delete rec; 362 return; 363 } 364 365 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 366 rec->set_next(cur_head); 367 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue, 368 (void*)cur_head)) { 369 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 370 rec->set_next(cur_head); 371 } 372 NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);) 373 } 374 375 /* 376 * The method is called at global safepoint 377 * during it synchronization process. 378 * 1. enqueue all JavaThreads' per-thread recorders 379 * 2. enqueue global recorder 380 * 3. retrieve all pending recorders 381 * 4. reset global sequence number generator 382 * 5. call worker's sync 383 */ 384 #define MAX_SAFEPOINTS_TO_SKIP 128 385 #define SAFE_SEQUENCE_THRESHOLD 30 386 #define HIGH_GENERATION_THRESHOLD 60 387 #define MAX_RECORDER_THREAD_RATIO 30 388 389 void MemTracker::sync() { 390 assert(_tracking_level > NMT_off, "NMT is not enabled"); 391 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 392 393 // Some GC tests hit large number of safepoints in short period of time 394 // without meaningful activities. We should prevent going to 395 // sync point in these cases, which can potentially exhaust generation buffer. 396 // Here is the factots to determine if we should go into sync point: 397 // 1. not to overflow sequence number 398 // 2. if we are in danger to overflow generation buffer 399 // 3. how many safepoints we already skipped sync point 400 if (_state == NMT_started) { 401 // worker thread is not ready, no one can manage generation 402 // buffer, so skip this safepoint 403 if (_worker_thread == NULL) return; 404 405 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) { 406 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint; 407 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS; 408 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) { 409 _sync_point_skip_count ++; 410 return; 411 } 412 } 413 { 414 // This method is running at safepoint, with ThreadCritical lock, 415 // it should guarantee that NMT is fully sync-ed. 416 ThreadCritical tc; 417 418 // We can NOT execute NMT sync-point if there are pending tracking ops. 419 if (_pending_op_count == 0) { 420 SequenceGenerator::reset(); 421 _sync_point_skip_count = 0; 422 423 // walk all JavaThreads to collect recorders 424 SyncThreadRecorderClosure stc; 425 Threads::threads_do(&stc); 426 427 _thread_count = stc.get_thread_count(); 428 MemRecorder* pending_recorders = get_pending_recorders(); 429 430 if (_global_recorder != NULL) { 431 _global_recorder->set_next(pending_recorders); 432 pending_recorders = _global_recorder; 433 _global_recorder = NULL; 434 } 435 436 // see if NMT has too many outstanding recorder instances, it usually 437 // means that worker thread is lagging behind in processing them. 438 if (!AutoShutdownNMT) { 439 _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); 440 } 441 442 // check _worker_thread with lock to avoid racing condition 443 if (_worker_thread != NULL) { 444 _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); 445 } 446 assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); 447 } else { 448 _sync_point_skip_count ++; 449 } 450 } 451 } 452 453 // now, it is the time to shut whole things off 454 if (_state == NMT_final_shutdown) { 455 // walk all JavaThreads to delete all recorders 456 SyncThreadRecorderClosure stc; 457 Threads::threads_do(&stc); 458 // delete global recorder 459 { 460 ThreadCritical tc; 461 if (_global_recorder != NULL) { 462 delete _global_recorder; 463 _global_recorder = NULL; 464 } 465 } 466 MemRecorder* pending_recorders = get_pending_recorders(); 467 if (pending_recorders != NULL) { 468 delete pending_recorders; 469 } 470 // try at a later sync point to ensure MemRecorder instance drops to zero to 471 // completely shutdown NMT 472 if (MemRecorder::_instance_count == 0) { 473 _state = NMT_shutdown; 474 _tracking_level = NMT_off; 475 } 476 } 477 } 478 479 /* 480 * Start worker thread. 481 */ 482 bool MemTracker::start_worker(MemSnapshot* snapshot) { 483 assert(_worker_thread == NULL && _snapshot != NULL, "Just Check"); 484 _worker_thread = new (std::nothrow) MemTrackWorker(snapshot); 485 if (_worker_thread == NULL) { 486 return false; 487 } else if (_worker_thread->has_error()) { 488 delete _worker_thread; 489 _worker_thread = NULL; 490 return false; 491 } 492 _worker_thread->start(); 493 return true; 494 } 495 496 /* 497 * We need to collect a JavaThread's per-thread recorder 498 * before it exits. 499 */ 500 void MemTracker::thread_exiting(JavaThread* thread) { 501 if (is_on()) { 502 MemRecorder* rec = thread->get_recorder(); 503 if (rec != NULL) { 504 enqueue_pending_recorder(rec); 505 thread->set_recorder(NULL); 506 } 507 } 508 } 509 510 // baseline current memory snapshot 511 bool MemTracker::baseline() { 512 MutexLocker lock(_query_lock); 513 MemSnapshot* snapshot = get_snapshot(); 514 if (snapshot != NULL) { 515 return _baseline.baseline(*snapshot, false); 516 } 517 return false; 518 } 519 520 // print memory usage from current snapshot 521 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 522 MemBaseline baseline; 523 MutexLocker lock(_query_lock); 524 MemSnapshot* snapshot = get_snapshot(); 525 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 526 BaselineReporter reporter(out, unit); 527 reporter.report_baseline(baseline, summary_only); 528 return true; 529 } 530 return false; 531 } 532 533 // Whitebox API for blocking until the current generation of NMT data has been merged 534 bool MemTracker::wbtest_wait_for_data_merge() { 535 // NMT can't be shutdown while we're holding _query_lock 536 MutexLocker lock(_query_lock); 537 assert(_worker_thread != NULL, "Invalid query"); 538 // the generation at query time, so NMT will spin till this generation is processed 539 unsigned long generation_at_query_time = SequenceGenerator::current_generation(); 540 unsigned long current_processing_generation = _processing_generation; 541 // if generation counter overflown 542 bool generation_overflown = (generation_at_query_time < current_processing_generation); 543 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 544 // spin 545 while (!shutdown_in_progress()) { 546 if (!generation_overflown) { 547 if (current_processing_generation > generation_at_query_time) { 548 return true; 549 } 550 } else { 551 assert(generations_to_wrap >= 0, "Sanity check"); 552 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 553 assert(current_generations_to_wrap >= 0, "Sanity check"); 554 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient 555 if (current_generations_to_wrap > generations_to_wrap && 556 current_processing_generation > generation_at_query_time) { 557 return true; 558 } 559 } 560 561 // if worker thread is idle, but generation is not advancing, that means 562 // there is not safepoint to let NMT advance generation, force one. 563 if (_worker_thread_idle) { 564 VM_ForceSafepoint vfs; 565 VMThread::execute(&vfs); 566 } 567 MemSnapshot* snapshot = get_snapshot(); 568 if (snapshot == NULL) { 569 return false; 570 } 571 snapshot->wait(1000); 572 current_processing_generation = _processing_generation; 573 } 574 // We end up here if NMT is shutting down before our data has been merged 575 return false; 576 } 577 578 // compare memory usage between current snapshot and baseline 579 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 580 MutexLocker lock(_query_lock); 581 if (_baseline.baselined()) { 582 MemBaseline baseline; 583 MemSnapshot* snapshot = get_snapshot(); 584 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 585 BaselineReporter reporter(out, unit); 586 reporter.diff_baselines(baseline, _baseline, summary_only); 587 return true; 588 } 589 } 590 return false; 591 } 592 593 #ifndef PRODUCT 594 void MemTracker::walk_stack(int toSkip, char* buf, int len) { 595 int cur_len = 0; 596 char tmp[1024]; 597 address pc; 598 599 while (cur_len < len) { 600 pc = os::get_caller_pc(toSkip + 1); 601 if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) { 602 jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp); 603 cur_len = (int)strlen(buf); 604 } else { 605 buf[cur_len] = '\0'; 606 break; 607 } 608 toSkip ++; 609 } 610 } 611 612 void MemTracker::print_tracker_stats(outputStream* st) { 613 st->print_cr("\nMemory Tracker Stats:"); 614 st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num()); 615 st->print_cr("\tthead count = %d", _thread_count); 616 st->print_cr("\tArena instance = %d", Arena::_instance_count); 617 st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count); 618 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count); 619 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count); 620 if (_worker_thread != NULL) { 621 st->print_cr("\tWorker thread:"); 622 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count); 623 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders()); 624 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count); 625 } else { 626 st->print_cr("\tWorker thread is not started"); 627 } 628 st->print_cr(" "); 629 630 if (_snapshot != NULL) { 631 _snapshot->print_snapshot_stats(st); 632 } else { 633 st->print_cr("No snapshot"); 634 } 635 } 636 #endif 637 638 639 // Tracker Implementation 640 641 /* 642 * Create a tracker. 643 * This is a fairly complicated constructor, as it has to make two important decisions: 644 * 1) Does it need to take ThreadCritical lock to write tracking record 645 * 2) Does it need to pre-reserve a sequence number for the tracking record 646 * 647 * The rules to determine if ThreadCritical is needed: 648 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM 649 * still in single thread mode. 650 * 2. For all threads other than JavaThread, ThreadCritical is needed 651 * to write to recorders to global recorder. 652 * 3. For JavaThreads that are no longer visible by safepoint, also 653 * need to take ThreadCritical and records are written to global 654 * recorders, since these threads are NOT walked by Threads.do_thread(). 655 * 4. JavaThreads that are running in safepoint-safe states do not stop 656 * for safepoints, ThreadCritical lock should be taken to write 657 * memory records. 658 * 5. JavaThreads that are running in VM state do not need any lock and 659 * records are written to per-thread recorders. 660 * 6. For a thread has yet to attach VM 'Thread', they need to take 661 * ThreadCritical to write to global recorder. 662 * 663 * The memory operations that need pre-reserve sequence numbers: 664 * The memory operations that "release" memory blocks and the 665 * operations can fail, need to pre-reserve sequence number. They 666 * are realloc, uncommit and release. 667 * 668 * The reason for pre-reserve sequence number, is to prevent race condition: 669 * Thread 1 Thread 2 670 * <release> 671 * <allocate> 672 * <write allocate record> 673 * <write release record> 674 * if Thread 2 happens to obtain the memory address Thread 1 just released, 675 * then NMT can mistakenly report the memory is free. 676 * 677 * Noticeably, free() does not need pre-reserve sequence number, because the call 678 * does not fail, so we can alway write "release" record before the memory is actaully 679 * freed. 680 * 681 * For realloc, uncommit and release, following coding pattern should be used: 682 * 683 * MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); 684 * ptr = ::realloc(...); 685 * if (ptr == NULL) { 686 * tkr.record(...) 687 * } else { 688 * tkr.discard(); 689 * } 690 * 691 * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); 692 * if (uncommit(...)) { 693 * tkr.record(...); 694 * } else { 695 * tkr.discard(); 696 * } 697 * 698 * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 699 * if (release(...)) { 700 * tkr.record(...); 701 * } else { 702 * tkr.discard(); 703 * } 704 * 705 * Since pre-reserved sequence number is only good for the generation that it is acquired, 706 * when there is pending Tracker that reserved sequence number, NMT sync-point has 707 * to be skipped to prevent from advancing generation. This is done by inc and dec 708 * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped. 709 * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads 710 * that honor safepoints, safepoint can not occur during the memory operations, so the 711 * pre-reserved sequence number won't cross the generation boundry. 712 */ 713 MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) { 714 _op = NoOp; 715 _seq = 0; 716 if (MemTracker::is_on()) { 717 _java_thread = NULL; 718 _op = op; 719 720 // figure out if ThreadCritical lock is needed to write this operation 721 // to MemTracker 722 if (MemTracker::is_single_threaded_bootstrap()) { 723 thr = NULL; 724 } else if (thr == NULL) { 725 // don't use Thread::current(), since it is possible that 726 // the calling thread has yet to attach to VM 'Thread', 727 // which will result assertion failure 728 thr = ThreadLocalStorage::thread(); 729 } 730 731 if (thr != NULL) { 732 // Check NMT load 733 MemTracker::check_NMT_load(thr); 734 735 if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) { 736 _java_thread = (JavaThread*)thr; 737 JavaThreadState state = _java_thread->thread_state(); 738 // JavaThreads that are safepoint safe, can run through safepoint, 739 // so ThreadCritical is needed to ensure no threads at safepoint create 740 // new records while the records are being gathered and the sequence number is changing 741 _need_thread_critical_lock = 742 SafepointSynchronize::safepoint_safe(_java_thread, state); 743 } else { 744 _need_thread_critical_lock = true; 745 } 746 } else { 747 _need_thread_critical_lock 748 = !MemTracker::is_single_threaded_bootstrap(); 749 } 750 751 // see if we need to pre-reserve sequence number for this operation 752 if (_op == Realloc || _op == Uncommit || _op == Release) { 753 if (_need_thread_critical_lock) { 754 ThreadCritical tc; 755 MemTracker::inc_pending_op_count(); 756 _seq = SequenceGenerator::next(); 757 } else { 758 // for the threads that honor safepoints, no safepoint can occur 759 // during the lifespan of tracker, so we don't need to increase 760 // pending op count. 761 _seq = SequenceGenerator::next(); 762 } 763 } 764 } 765 } 766 767 void MemTracker::Tracker::discard() { 768 if (MemTracker::is_on() && _seq != 0) { 769 if (_need_thread_critical_lock) { 770 ThreadCritical tc; 771 MemTracker::dec_pending_op_count(); 772 } 773 _seq = 0; 774 } 775 } 776 777 778 void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size, 779 MEMFLAGS flags, address pc) { 780 assert(old_addr != NULL && new_addr != NULL, "Sanity check"); 781 assert(_op == Realloc || _op == NoOp, "Wrong call"); 782 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 783 assert(_seq > 0, "Need pre-reserve sequence number"); 784 if (_need_thread_critical_lock) { 785 ThreadCritical tc; 786 // free old address, use pre-reserved sequence number 787 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 788 0, _seq, pc, _java_thread); 789 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 790 size, SequenceGenerator::next(), pc, _java_thread); 791 // decrement MemTracker pending_op_count 792 MemTracker::dec_pending_op_count(); 793 } else { 794 // free old address, use pre-reserved sequence number 795 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 796 0, _seq, pc, _java_thread); 797 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 798 size, SequenceGenerator::next(), pc, _java_thread); 799 } 800 _seq = 0; 801 } 802 } 803 804 void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) { 805 // OOM already? 806 if (addr == NULL) return; 807 808 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 809 bool pre_reserved_seq = (_seq != 0); 810 address pc = CALLER_CALLER_PC; 811 MEMFLAGS orig_flags = flags; 812 813 // or the tagging flags 814 switch(_op) { 815 case Malloc: 816 flags |= MemPointerRecord::malloc_tag(); 817 break; 818 case Free: 819 flags = MemPointerRecord::free_tag(); 820 break; 821 case Realloc: 822 fatal("Use the other Tracker::record()"); 823 break; 824 case Reserve: 825 case ReserveAndCommit: 826 flags |= MemPointerRecord::virtual_memory_reserve_tag(); 827 break; 828 case Commit: 829 flags = MemPointerRecord::virtual_memory_commit_tag(); 830 break; 831 case Type: 832 flags |= MemPointerRecord::virtual_memory_type_tag(); 833 break; 834 case Uncommit: 835 assert(pre_reserved_seq, "Need pre-reserve sequence number"); 836 flags = MemPointerRecord::virtual_memory_uncommit_tag(); 837 break; 838 case Release: 839 assert(pre_reserved_seq, "Need pre-reserve sequence number"); 840 flags = MemPointerRecord::virtual_memory_release_tag(); 841 break; 842 case ArenaSize: 843 // a bit of hack here, add a small postive offset to arena 844 // address for its size record, so the size record is sorted 845 // right after arena record. 846 flags = MemPointerRecord::arena_size_tag(); 847 addr += sizeof(void*); 848 break; 849 case StackRelease: 850 flags = MemPointerRecord::virtual_memory_release_tag(); 851 break; 852 default: 853 ShouldNotReachHere(); 854 } 855 856 // write memory tracking record 857 if (_need_thread_critical_lock) { 858 ThreadCritical tc; 859 if (_seq == 0) _seq = SequenceGenerator::next(); 860 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 861 if (_op == ReserveAndCommit) { 862 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 863 size, SequenceGenerator::next(), pc, _java_thread); 864 } 865 if (pre_reserved_seq) MemTracker::dec_pending_op_count(); 866 } else { 867 if (_seq == 0) _seq = SequenceGenerator::next(); 868 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 869 if (_op == ReserveAndCommit) { 870 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 871 size, SequenceGenerator::next(), pc, _java_thread); 872 } 873 } 874 _seq = 0; 875 } 876 } 877