1 /* 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 #include "oops/instanceKlass.hpp" 27 #include "runtime/atomic.hpp" 28 #include "runtime/interfaceSupport.hpp" 29 #include "runtime/mutexLocker.hpp" 30 #include "runtime/safepoint.hpp" 31 #include "runtime/threadCritical.hpp" 32 #include "runtime/vm_operations.hpp" 33 #include "services/memPtr.hpp" 34 #include "services/memReporter.hpp" 35 #include "services/memTracker.hpp" 36 #include "utilities/decoder.hpp" 37 #include "utilities/globalDefinitions.hpp" 38 39 bool NMT_track_callsite = false; 40 41 // walk all 'known' threads at NMT sync point, and collect their recorders 42 void SyncThreadRecorderClosure::do_thread(Thread* thread) { 43 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 44 if (thread->is_Java_thread()) { 45 JavaThread* javaThread = (JavaThread*)thread; 46 MemRecorder* recorder = javaThread->get_recorder(); 47 if (recorder != NULL) { 48 MemTracker::enqueue_pending_recorder(recorder); 49 javaThread->set_recorder(NULL); 50 } 51 } 52 _thread_count ++; 53 } 54 55 56 MemRecorder* volatile MemTracker::_global_recorder = NULL; 57 MemSnapshot* MemTracker::_snapshot = NULL; 58 MemBaseline MemTracker::_baseline; 59 Mutex* MemTracker::_query_lock = NULL; 60 MemRecorder* volatile MemTracker::_merge_pending_queue = NULL; 61 MemRecorder* volatile MemTracker::_pooled_recorders = NULL; 62 MemTrackWorker* MemTracker::_worker_thread = NULL; 63 int MemTracker::_sync_point_skip_count = 0; 64 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; 65 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; 66 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; 67 int MemTracker::_thread_count = 255; 68 volatile jint MemTracker::_pooled_recorder_count = 0; 69 volatile unsigned long MemTracker::_processing_generation = 0; 70 volatile bool MemTracker::_worker_thread_idle = false; 71 volatile jint MemTracker::_pending_op_count = 0; 72 volatile bool MemTracker::_slowdown_calling_thread = false; 73 debug_only(intx MemTracker::_main_thread_tid = 0;) 74 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) 75 76 void MemTracker::init_tracking_options(const char* option_line) { 77 _tracking_level = NMT_off; 78 if (strcmp(option_line, "=summary") == 0) { 79 _tracking_level = NMT_summary; 80 } else if (strcmp(option_line, "=detail") == 0) { 81 _tracking_level = NMT_detail; 82 } else if (strcmp(option_line, "=off") != 0) { 83 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); 84 } 85 } 86 87 // first phase of bootstrapping, when VM is still in single-threaded mode. 88 void MemTracker::bootstrap_single_thread() { 89 if (_tracking_level > NMT_off) { 90 assert(_state == NMT_uninited, "wrong state"); 91 92 // NMT is not supported with UseMallocOnly is on. NMT can NOT 93 // handle the amount of malloc data without significantly impacting 94 // runtime performance when this flag is on. 95 if (UseMallocOnly) { 96 shutdown(NMT_use_malloc_only); 97 return; 98 } 99 100 _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock"); 101 if (_query_lock == NULL) { 102 shutdown(NMT_out_of_memory); 103 return; 104 } 105 106 debug_only(_main_thread_tid = os::current_thread_id();) 107 _state = NMT_bootstrapping_single_thread; 108 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 109 } 110 } 111 112 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode. 113 void MemTracker::bootstrap_multi_thread() { 114 if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) { 115 // create nmt lock for multi-thread execution 116 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 117 _state = NMT_bootstrapping_multi_thread; 118 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 119 } 120 } 121 122 // fully start nmt 123 void MemTracker::start() { 124 // Native memory tracking is off from command line option 125 if (_tracking_level == NMT_off || shutdown_in_progress()) return; 126 127 assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 128 assert(_state == NMT_bootstrapping_multi_thread, "wrong state"); 129 130 _snapshot = new (std::nothrow)MemSnapshot(); 131 if (_snapshot != NULL) { 132 if (!_snapshot->out_of_memory() && start_worker(_snapshot)) { 133 _state = NMT_started; 134 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); 135 return; 136 } 137 138 delete _snapshot; 139 _snapshot = NULL; 140 } 141 142 // fail to start native memory tracking, shut it down 143 shutdown(NMT_initialization); 144 } 145 146 /** 147 * Shutting down native memory tracking. 148 * We can not shutdown native memory tracking immediately, so we just 149 * setup shutdown pending flag, every native memory tracking component 150 * should orderly shut itself down. 151 * 152 * The shutdown sequences: 153 * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state 154 * 2. Worker thread calls MemTracker::final_shutdown(), which transites 155 * MemTracker to final shutdown state. 156 * 3. At sync point, MemTracker does final cleanup, before sets memory 157 * tracking level to off to complete shutdown. 158 */ 159 void MemTracker::shutdown(ShutdownReason reason) { 160 if (_tracking_level == NMT_off) return; 161 162 if (_state <= NMT_bootstrapping_single_thread) { 163 // we still in single thread mode, there is not contention 164 _state = NMT_shutdown_pending; 165 _reason = reason; 166 } else { 167 // we want to know who initialized shutdown 168 if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending, 169 (jint*)&_state, (jint)NMT_started)) { 170 _reason = reason; 171 } 172 } 173 } 174 175 // final phase of shutdown 176 void MemTracker::final_shutdown() { 177 // delete all pending recorders and pooled recorders 178 delete_all_pending_recorders(); 179 delete_all_pooled_recorders(); 180 181 { 182 // shared baseline and snapshot are the only objects needed to 183 // create query results 184 MutexLockerEx locker(_query_lock, true); 185 // cleanup baseline data and snapshot 186 _baseline.clear(); 187 delete _snapshot; 188 _snapshot = NULL; 189 } 190 191 // shutdown shared decoder instance, since it is only 192 // used by native memory tracking so far. 193 Decoder::shutdown(); 194 195 MemTrackWorker* worker = NULL; 196 { 197 ThreadCritical tc; 198 // can not delete worker inside the thread critical 199 if (_worker_thread != NULL && Thread::current() == _worker_thread) { 200 worker = _worker_thread; 201 _worker_thread = NULL; 202 } 203 } 204 if (worker != NULL) { 205 delete worker; 206 } 207 _state = NMT_final_shutdown; 208 } 209 210 // delete all pooled recorders 211 void MemTracker::delete_all_pooled_recorders() { 212 // free all pooled recorders 213 MemRecorder* volatile cur_head = _pooled_recorders; 214 if (cur_head != NULL) { 215 MemRecorder* null_ptr = NULL; 216 while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, 217 (void*)&_pooled_recorders, (void*)cur_head)) { 218 cur_head = _pooled_recorders; 219 } 220 if (cur_head != NULL) { 221 delete cur_head; 222 _pooled_recorder_count = 0; 223 } 224 } 225 } 226 227 // delete all recorders in pending queue 228 void MemTracker::delete_all_pending_recorders() { 229 // free all pending recorders 230 MemRecorder* pending_head = get_pending_recorders(); 231 if (pending_head != NULL) { 232 delete pending_head; 233 } 234 } 235 236 /* 237 * retrieve per-thread recorder of specified thread. 238 * if thread == NULL, it means global recorder 239 */ 240 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) { 241 if (shutdown_in_progress()) return NULL; 242 243 MemRecorder* rc; 244 if (thread == NULL) { 245 rc = _global_recorder; 246 } else { 247 rc = thread->get_recorder(); 248 } 249 250 if (rc != NULL && rc->is_full()) { 251 enqueue_pending_recorder(rc); 252 rc = NULL; 253 } 254 255 if (rc == NULL) { 256 rc = get_new_or_pooled_instance(); 257 if (thread == NULL) { 258 _global_recorder = rc; 259 } else { 260 thread->set_recorder(rc); 261 } 262 } 263 return rc; 264 } 265 266 /* 267 * get a per-thread recorder from pool, or create a new one if 268 * there is not one available. 269 */ 270 MemRecorder* MemTracker::get_new_or_pooled_instance() { 271 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders); 272 if (cur_head == NULL) { 273 MemRecorder* rec = new (std::nothrow)MemRecorder(); 274 if (rec == NULL || rec->out_of_memory()) { 275 shutdown(NMT_out_of_memory); 276 if (rec != NULL) { 277 delete rec; 278 rec = NULL; 279 } 280 } 281 return rec; 282 } else { 283 MemRecorder* next_head = cur_head->next(); 284 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders, 285 (void*)cur_head)) { 286 return get_new_or_pooled_instance(); 287 } 288 cur_head->set_next(NULL); 289 Atomic::dec(&_pooled_recorder_count); 290 cur_head->set_generation(); 291 return cur_head; 292 } 293 } 294 295 /* 296 * retrieve all recorders in pending queue, and empty the queue 297 */ 298 MemRecorder* MemTracker::get_pending_recorders() { 299 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 300 MemRecorder* null_ptr = NULL; 301 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue, 302 (void*)cur_head)) { 303 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 304 } 305 NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count)); 306 return cur_head; 307 } 308 309 /* 310 * release a recorder to recorder pool. 311 */ 312 void MemTracker::release_thread_recorder(MemRecorder* rec) { 313 assert(rec != NULL, "null recorder"); 314 // we don't want to pool too many recorders 315 rec->set_next(NULL); 316 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) { 317 delete rec; 318 return; 319 } 320 321 rec->clear(); 322 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders); 323 rec->set_next(cur_head); 324 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders, 325 (void*)cur_head)) { 326 cur_head = const_cast<MemRecorder*>(_pooled_recorders); 327 rec->set_next(cur_head); 328 } 329 Atomic::inc(&_pooled_recorder_count); 330 } 331 332 // write a record to proper recorder. No lock can be taken from this method 333 // down. 334 void MemTracker::write_tracking_record(address addr, MEMFLAGS flags, 335 size_t size, jint seq, address pc, JavaThread* thread) { 336 337 MemRecorder* rc = get_thread_recorder(thread); 338 if (rc != NULL) { 339 rc->record(addr, flags, size, seq, pc); 340 } 341 } 342 343 /** 344 * enqueue a recorder to pending queue 345 */ 346 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) { 347 assert(rec != NULL, "null recorder"); 348 349 // we are shutting down, so just delete it 350 if (shutdown_in_progress()) { 351 rec->set_next(NULL); 352 delete rec; 353 return; 354 } 355 356 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 357 rec->set_next(cur_head); 358 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue, 359 (void*)cur_head)) { 360 cur_head = const_cast<MemRecorder*>(_merge_pending_queue); 361 rec->set_next(cur_head); 362 } 363 NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);) 364 } 365 366 /* 367 * The method is called at global safepoint 368 * during it synchronization process. 369 * 1. enqueue all JavaThreads' per-thread recorders 370 * 2. enqueue global recorder 371 * 3. retrieve all pending recorders 372 * 4. reset global sequence number generator 373 * 5. call worker's sync 374 */ 375 #define MAX_SAFEPOINTS_TO_SKIP 128 376 #define SAFE_SEQUENCE_THRESHOLD 30 377 #define HIGH_GENERATION_THRESHOLD 60 378 #define MAX_RECORDER_THREAD_RATIO 30 379 380 void MemTracker::sync() { 381 assert(_tracking_level > NMT_off, "NMT is not enabled"); 382 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); 383 384 // Some GC tests hit large number of safepoints in short period of time 385 // without meaningful activities. We should prevent going to 386 // sync point in these cases, which can potentially exhaust generation buffer. 387 // Here is the factots to determine if we should go into sync point: 388 // 1. not to overflow sequence number 389 // 2. if we are in danger to overflow generation buffer 390 // 3. how many safepoints we already skipped sync point 391 if (_state == NMT_started) { 392 // worker thread is not ready, no one can manage generation 393 // buffer, so skip this safepoint 394 if (_worker_thread == NULL) return; 395 396 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) { 397 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint; 398 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS; 399 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) { 400 _sync_point_skip_count ++; 401 return; 402 } 403 } 404 { 405 // This method is running at safepoint, with ThreadCritical lock, 406 // it should guarantee that NMT is fully sync-ed. 407 ThreadCritical tc; 408 409 // We can NOT execute NMT sync-point if there are pending tracking ops. 410 if (_pending_op_count == 0) { 411 SequenceGenerator::reset(); 412 _sync_point_skip_count = 0; 413 414 // walk all JavaThreads to collect recorders 415 SyncThreadRecorderClosure stc; 416 Threads::threads_do(&stc); 417 418 _thread_count = stc.get_thread_count(); 419 MemRecorder* pending_recorders = get_pending_recorders(); 420 421 if (_global_recorder != NULL) { 422 _global_recorder->set_next(pending_recorders); 423 pending_recorders = _global_recorder; 424 _global_recorder = NULL; 425 } 426 427 // see if NMT has too many outstanding recorder instances, it usually 428 // means that worker thread is lagging behind in processing them. 429 if (!AutoShutdownNMT) { 430 _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); 431 } 432 433 // check _worker_thread with lock to avoid racing condition 434 if (_worker_thread != NULL) { 435 _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); 436 } 437 438 assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); 439 } else { 440 _sync_point_skip_count ++; 441 } 442 } 443 } 444 445 // now, it is the time to shut whole things off 446 if (_state == NMT_final_shutdown) { 447 // walk all JavaThreads to delete all recorders 448 SyncThreadRecorderClosure stc; 449 Threads::threads_do(&stc); 450 // delete global recorder 451 { 452 ThreadCritical tc; 453 if (_global_recorder != NULL) { 454 delete _global_recorder; 455 _global_recorder = NULL; 456 } 457 } 458 MemRecorder* pending_recorders = get_pending_recorders(); 459 if (pending_recorders != NULL) { 460 delete pending_recorders; 461 } 462 // try at a later sync point to ensure MemRecorder instance drops to zero to 463 // completely shutdown NMT 464 if (MemRecorder::_instance_count == 0) { 465 _state = NMT_shutdown; 466 _tracking_level = NMT_off; 467 } 468 } 469 } 470 471 /* 472 * Start worker thread. 473 */ 474 bool MemTracker::start_worker(MemSnapshot* snapshot) { 475 assert(_worker_thread == NULL && _snapshot != NULL, "Just Check"); 476 _worker_thread = new (std::nothrow) MemTrackWorker(snapshot); 477 if (_worker_thread == NULL) { 478 return false; 479 } else if (_worker_thread->has_error()) { 480 delete _worker_thread; 481 _worker_thread = NULL; 482 return false; 483 } 484 _worker_thread->start(); 485 return true; 486 } 487 488 /* 489 * We need to collect a JavaThread's per-thread recorder 490 * before it exits. 491 */ 492 void MemTracker::thread_exiting(JavaThread* thread) { 493 if (is_on()) { 494 MemRecorder* rec = thread->get_recorder(); 495 if (rec != NULL) { 496 enqueue_pending_recorder(rec); 497 thread->set_recorder(NULL); 498 } 499 } 500 } 501 502 // baseline current memory snapshot 503 bool MemTracker::baseline() { 504 MutexLocker lock(_query_lock); 505 MemSnapshot* snapshot = get_snapshot(); 506 if (snapshot != NULL) { 507 return _baseline.baseline(*snapshot, false); 508 } 509 return false; 510 } 511 512 // print memory usage from current snapshot 513 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 514 MemBaseline baseline; 515 MutexLocker lock(_query_lock); 516 MemSnapshot* snapshot = get_snapshot(); 517 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 518 BaselineReporter reporter(out, unit); 519 reporter.report_baseline(baseline, summary_only); 520 return true; 521 } 522 return false; 523 } 524 525 // Whitebox API for blocking until the current generation of NMT data has been merged 526 bool MemTracker::wbtest_wait_for_data_merge() { 527 // NMT can't be shutdown while we're holding _query_lock 528 MutexLocker lock(_query_lock); 529 assert(_worker_thread != NULL, "Invalid query"); 530 // the generation at query time, so NMT will spin till this generation is processed 531 unsigned long generation_at_query_time = SequenceGenerator::current_generation(); 532 unsigned long current_processing_generation = _processing_generation; 533 // if generation counter overflown 534 bool generation_overflown = (generation_at_query_time < current_processing_generation); 535 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 536 // spin 537 while (!shutdown_in_progress()) { 538 if (!generation_overflown) { 539 if (current_processing_generation > generation_at_query_time) { 540 return true; 541 } 542 } else { 543 assert(generations_to_wrap >= 0, "Sanity check"); 544 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 545 assert(current_generations_to_wrap >= 0, "Sanity check"); 546 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient 547 if (current_generations_to_wrap > generations_to_wrap && 548 current_processing_generation > generation_at_query_time) { 549 return true; 550 } 551 } 552 553 // if worker thread is idle, but generation is not advancing, that means 554 // there is not safepoint to let NMT advance generation, force one. 555 if (_worker_thread_idle) { 556 VM_ForceSafepoint vfs; 557 VMThread::execute(&vfs); 558 } 559 MemSnapshot* snapshot = get_snapshot(); 560 if (snapshot == NULL) { 561 return false; 562 } 563 snapshot->wait(1000); 564 current_processing_generation = _processing_generation; 565 } 566 // We end up here if NMT is shutting down before our data has been merged 567 return false; 568 } 569 570 // compare memory usage between current snapshot and baseline 571 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 572 MutexLocker lock(_query_lock); 573 if (_baseline.baselined()) { 574 MemBaseline baseline; 575 MemSnapshot* snapshot = get_snapshot(); 576 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { 577 BaselineReporter reporter(out, unit); 578 reporter.diff_baselines(baseline, _baseline, summary_only); 579 return true; 580 } 581 } 582 return false; 583 } 584 585 #ifndef PRODUCT 586 void MemTracker::walk_stack(int toSkip, char* buf, int len) { 587 int cur_len = 0; 588 char tmp[1024]; 589 address pc; 590 591 while (cur_len < len) { 592 pc = os::get_caller_pc(toSkip + 1); 593 if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) { 594 jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp); 595 cur_len = (int)strlen(buf); 596 } else { 597 buf[cur_len] = '\0'; 598 break; 599 } 600 toSkip ++; 601 } 602 } 603 604 void MemTracker::print_tracker_stats(outputStream* st) { 605 st->print_cr("\nMemory Tracker Stats:"); 606 st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num()); 607 st->print_cr("\tthead count = %d", _thread_count); 608 st->print_cr("\tArena instance = %d", Arena::_instance_count); 609 st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count); 610 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count); 611 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count); 612 if (_worker_thread != NULL) { 613 st->print_cr("\tWorker thread:"); 614 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count); 615 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders()); 616 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count); 617 } else { 618 st->print_cr("\tWorker thread is not started"); 619 } 620 st->print_cr(" "); 621 622 if (_snapshot != NULL) { 623 _snapshot->print_snapshot_stats(st); 624 } else { 625 st->print_cr("No snapshot"); 626 } 627 } 628 #endif 629 630 631 // NMTTrackOp Implementation 632 633 /* 634 * Create a NMT tracking OP. 635 * This is a fairly complicated constructor, as it has to make two important decisions: 636 * 1) Does it need to take ThreadCritical lock to write tracking record 637 * 2) Does it need to pre-reserve a sequence number for the tracking record 638 * 639 * The rules to determine if ThreadCritical is needed: 640 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM 641 * still in single thread mode. 642 * 2. For all threads other than JavaThread, ThreadCritical is needed 643 * to write to recorders to global recorder. 644 * 3. For JavaThreads that are not longer visible by safepoint, also 645 * need to take ThreadCritical and records are written to global 646 * recorders, since these threads are NOT walked by Threads.do_thread(). 647 * 4. JavaThreads that are running in safepoint-safe states do not stop 648 * for safepoints, ThreadCritical lock shoul be taken to write 649 * memory records. 650 * 5. JavaThreads that are running in VM state do not need any lock and 651 * records are written to per-thread recorders. 652 * 6. For a thread has yet to attach VM 'Thread', they need to take 653 * ThreadCritical to write to global recorder. 654 * 655 * The memory operations that need pre-reserve sequence numbers: 656 * The memory operations that "release" memory blocks and the 657 * operations can fail, need to pre-reserve sequence number. They 658 * are realloc, uncommit and release. 659 * 660 * The reason for pre-reserve sequence number, is to prevent race condition: 661 * Thread 1 Thread 2 662 * <release> 663 * <allocate> 664 * <write allocate record> 665 * <write release record> 666 * if Thread 2 happens to obtain the memory address Thread 1 just released, 667 * then NMT can mistakenly report the memory is free. 668 * 669 * Noticeably, free() does not need pre-reserve sequence number, because the call 670 * does not fail, so we can alway write "release" record before the memory is actaully 671 * freed. 672 * 673 * For realloc, uncommit and release, following coding pattern should be used: 674 * 675 * NMTTrackOp op(ReallocOp); NMTTrackOp op(UncommitOp); NMTTrackOp op(ReleaseOp); 676 * ptr = ::realloc(...); if (!uncommit(...)) { if (!release(...)) { 677 * if (ptr == NULL) { op.abort_op(); op.abort_op(); 678 * op.abort_op(); } else { } else 679 * } else { op.execute_op(....) op.execute_op(....); 680 * op.execute_op(...) } } 681 * } 682 * 683 * Since pre-reserved sequence number is only good for the generation that it is acquired, 684 * when there is pending NMTTrackOp that reserved sequence number, NMT sync-point has 685 * to be skipped to prevent advancing generation. This is done by inc and dec 686 * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped. 687 * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads 688 * that honor safepoints, safepoint can not occur during the memory operations, so the 689 * pre-reserved sequence number won't cross the generation boundry. 690 */ 691 NMTTrackOp::NMTTrackOp(NMTMemoryOps op, Thread* thr) { 692 _op = NoOp; 693 _seq = 0; 694 if (MemTracker::is_on()) { 695 _java_thread = NULL; 696 _op = op; 697 698 // figure out if ThreadCritical lock is needed to write this operation 699 // to MemTracker 700 if (MemTracker::is_single_threaded_bootstrap()) { 701 thr = NULL; 702 } else if (thr == NULL) { 703 // don't use Thread::current(), since it is possible that 704 // the calling thread has yet to attach to VM 'Thread', 705 // which will result assertion failure 706 thr = ThreadLocalStorage::thread(); 707 } 708 709 if (thr != NULL) { 710 // Check NMT load 711 MemTracker::check_NMT_load(thr); 712 713 if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) { 714 _java_thread = (JavaThread*)thr; 715 JavaThreadState state = _java_thread->thread_state(); 716 // JavaThreads that are safepoint safe, can run through safepoint, 717 // so ThreadCritical is needed to ensure no threads at safepoint create 718 // new records while the records are being gathered and the sequence number is changing 719 _need_thread_critical_lock = 720 SafepointSynchronize::safepoint_safe(_java_thread, state); 721 } else { 722 _need_thread_critical_lock = true; 723 } 724 } else { 725 _need_thread_critical_lock 726 = !MemTracker::is_single_threaded_bootstrap(); 727 } 728 729 // see if we need to pre-reserve sequence number for this operation 730 switch(_op) { 731 case MallocOp: 732 case FreeOp: 733 case ReserveOp: 734 case CommitOp: 735 case ReserveAndCommitOp: 736 case TypeOp: 737 case ArenaSizeOp: 738 case StackReleaseOp: 739 // we don't need to pre-reserve sequence number 740 // for above ops 741 _seq = 0; 742 break; 743 case ReallocOp: 744 case UncommitOp: 745 case ReleaseOp: { 746 if (_need_thread_critical_lock) { 747 ThreadCritical tc; 748 MemTracker::inc_pending_op_count(); 749 _seq = SequenceGenerator::next(); 750 } else { 751 _seq = SequenceGenerator::next(); 752 } 753 break; 754 } 755 default: ShouldNotReachHere(); 756 } 757 } 758 } 759 760 void NMTTrackOp::abort_op() { 761 if (MemTracker::is_on() && _seq != 0 && _need_thread_critical_lock) { 762 ThreadCritical tc; 763 MemTracker::dec_pending_op_count(); 764 } 765 } 766 767 768 void NMTTrackOp::execute_op(address old_addr, address new_addr, size_t size, 769 MEMFLAGS flags, address pc) { 770 assert(old_addr != NULL && new_addr != NULL, "Sanity check"); 771 assert(_op == ReallocOp || _op == NoOp, "Wrong call"); 772 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 773 assert(_seq > 0, "Need pre-reserve sequence number"); 774 if (_need_thread_critical_lock) { 775 ThreadCritical tc; 776 // free old address, use pre-reserved sequence number 777 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 778 0, _seq, pc, _java_thread); 779 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 780 size, SequenceGenerator::next(), pc, _java_thread); 781 // decrement MemTracker pending_op_count 782 MemTracker::dec_pending_op_count(); 783 } else { 784 // free old address, use pre-reserved sequence number 785 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 786 0, _seq, pc, _java_thread); 787 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 788 size, SequenceGenerator::next(), pc, _java_thread); 789 } 790 } 791 } 792 793 void NMTTrackOp::execute_op(address addr, size_t size, MEMFLAGS flags, address pc) { 794 assert(addr != NULL, "Sanity check"); 795 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 796 bool pre_reserved_seq = (_seq != 0); 797 address pc = CALLER_CALLER_PC; 798 MEMFLAGS orig_flags = flags; 799 800 switch(_op) { 801 case MallocOp: 802 flags |= MemPointerRecord::malloc_tag(); 803 break; 804 case FreeOp: 805 flags = MemPointerRecord::free_tag(); 806 break; 807 case ReallocOp: 808 fatal("Usae the other NMTTrackOp::execute_op()"); 809 break; 810 case ReserveOp: 811 case ReserveAndCommitOp: 812 flags |= MemPointerRecord::virtual_memory_reserve_tag(); 813 break; 814 case CommitOp: 815 flags = MemPointerRecord::virtual_memory_commit_tag(); 816 break; 817 case TypeOp: 818 flags |= MemPointerRecord::virtual_memory_type_tag(); 819 break; 820 case UncommitOp: 821 assert(pre_reserved_seq, "Need pre-reserve sequence number"); 822 flags = MemPointerRecord::virtual_memory_uncommit_tag(); 823 break; 824 case ReleaseOp: 825 assert(pre_reserved_seq, "Need pre-reserve sequence number"); 826 flags = MemPointerRecord::virtual_memory_release_tag(); 827 break; 828 case ArenaSizeOp: 829 flags = MemPointerRecord::arena_size_tag(); 830 addr += sizeof(void*); 831 break; 832 case StackReleaseOp: 833 flags = MemPointerRecord::virtual_memory_release_tag(); 834 break; 835 default: 836 ShouldNotReachHere(); 837 } 838 839 // write memory tracking record 840 if (_need_thread_critical_lock) { 841 ThreadCritical tc; 842 if (_seq == 0) _seq = SequenceGenerator::next(); 843 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 844 if (_op == ReserveAndCommitOp) { 845 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 846 size, SequenceGenerator::next(), pc, _java_thread); 847 } 848 if (pre_reserved_seq) MemTracker::dec_pending_op_count(); 849 } else { 850 if (_seq == 0) _seq = SequenceGenerator::next(); 851 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 852 if (_op == ReserveAndCommitOp) { 853 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 854 size, SequenceGenerator::next(), pc, _java_thread); 855 } 856 } 857 #ifdef ASSERT 858 // to prevent from incorrectly reusing this op 859 _seq = 0; 860 #endif 861 } 862 } 863