51 }
52 _thread_count ++;
53 }
54
55
56 MemRecorder* volatile MemTracker::_global_recorder = NULL;
57 MemSnapshot* MemTracker::_snapshot = NULL;
58 MemBaseline MemTracker::_baseline;
59 Mutex* MemTracker::_query_lock = NULL;
60 MemRecorder* volatile MemTracker::_merge_pending_queue = NULL;
61 MemRecorder* volatile MemTracker::_pooled_recorders = NULL;
62 MemTrackWorker* MemTracker::_worker_thread = NULL;
63 int MemTracker::_sync_point_skip_count = 0;
64 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off;
65 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited;
66 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none;
67 int MemTracker::_thread_count = 255;
68 volatile jint MemTracker::_pooled_recorder_count = 0;
69 volatile unsigned long MemTracker::_processing_generation = 0;
70 volatile bool MemTracker::_worker_thread_idle = false;
71 volatile bool MemTracker::_slowdown_calling_thread = false;
72 debug_only(intx MemTracker::_main_thread_tid = 0;)
73 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;)
74
75 void MemTracker::init_tracking_options(const char* option_line) {
76 _tracking_level = NMT_off;
77 if (strcmp(option_line, "=summary") == 0) {
78 _tracking_level = NMT_summary;
79 } else if (strcmp(option_line, "=detail") == 0) {
80 _tracking_level = NMT_detail;
81 } else if (strcmp(option_line, "=off") != 0) {
82 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
83 }
84 }
85
86 // first phase of bootstrapping, when VM is still in single-threaded mode.
87 void MemTracker::bootstrap_single_thread() {
88 if (_tracking_level > NMT_off) {
89 assert(_state == NMT_uninited, "wrong state");
90
311 void MemTracker::release_thread_recorder(MemRecorder* rec) {
312 assert(rec != NULL, "null recorder");
313 // we don't want to pool too many recorders
314 rec->set_next(NULL);
315 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
316 delete rec;
317 return;
318 }
319
320 rec->clear();
321 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
322 rec->set_next(cur_head);
323 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
324 (void*)cur_head)) {
325 cur_head = const_cast<MemRecorder*>(_pooled_recorders);
326 rec->set_next(cur_head);
327 }
328 Atomic::inc(&_pooled_recorder_count);
329 }
330
331 /*
332 * This is the most important method in whole nmt implementation.
333 *
334 * Create a memory record.
335 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
336 * still in single thread mode.
337 * 2. For all threads other than JavaThread, ThreadCritical is needed
338 * to write to recorders to global recorder.
339 * 3. For JavaThreads that are not longer visible by safepoint, also
340 * need to take ThreadCritical and records are written to global
341 * recorders, since these threads are NOT walked by Threads.do_thread().
342 * 4. JavaThreads that are running in native state, have to transition
343 * to VM state before writing to per-thread recorders.
344 * 5. JavaThreads that are running in VM state do not need any lock and
345 * records are written to per-thread recorders.
346 * 6. For a thread has yet to attach VM 'Thread', they need to take
347 * ThreadCritical to write to global recorder.
348 *
349 * Important note:
350 * NO LOCK should be taken inside ThreadCritical lock !!!
351 */
352 void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
353 size_t size, address pc, Thread* thread) {
354 assert(addr != NULL, "Sanity check");
355 if (!shutdown_in_progress()) {
356 // single thread, we just write records direct to global recorder,'
357 // with any lock
358 if (_state == NMT_bootstrapping_single_thread) {
359 assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
360 thread = NULL;
361 } else {
362 if (thread == NULL) {
363 // don't use Thread::current(), since it is possible that
364 // the calling thread has yet to attach to VM 'Thread',
365 // which will result assertion failure
366 thread = ThreadLocalStorage::thread();
367 }
368 }
369
370 if (thread != NULL) {
371 // slow down all calling threads except NMT worker thread, so it
372 // can catch up.
373 if (_slowdown_calling_thread && thread != _worker_thread) {
374 os::yield_all();
375 }
376
377 if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
378 JavaThread* java_thread = (JavaThread*)thread;
379 JavaThreadState state = java_thread->thread_state();
380 if (SafepointSynchronize::safepoint_safe(java_thread, state)) {
381 // JavaThreads that are safepoint safe, can run through safepoint,
382 // so ThreadCritical is needed to ensure no threads at safepoint create
383 // new records while the records are being gathered and the sequence number is changing
384 ThreadCritical tc;
385 create_record_in_recorder(addr, flags, size, pc, java_thread);
386 } else {
387 create_record_in_recorder(addr, flags, size, pc, java_thread);
388 }
389 } else {
390 // other threads, such as worker and watcher threads, etc. need to
391 // take ThreadCritical to write to global recorder
392 ThreadCritical tc;
393 create_record_in_recorder(addr, flags, size, pc, NULL);
394 }
395 } else {
396 if (_state == NMT_bootstrapping_single_thread) {
397 // single thread, no lock needed
398 create_record_in_recorder(addr, flags, size, pc, NULL);
399 } else {
400 // for thread has yet to attach VM 'Thread', we can not use VM mutex.
401 // use native thread critical instead
402 ThreadCritical tc;
403 create_record_in_recorder(addr, flags, size, pc, NULL);
404 }
405 }
406 }
407 }
408
409 // write a record to proper recorder. No lock can be taken from this method
410 // down.
411 void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
412 size_t size, address pc, JavaThread* thread) {
413
414 MemRecorder* rc = get_thread_recorder(thread);
415 if (rc != NULL) {
416 rc->record(addr, flags, size, pc);
417 }
418 }
419
420 /**
421 * enqueue a recorder to pending queue
422 */
423 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
424 assert(rec != NULL, "null recorder");
425
426 // we are shutting down, so just delete it
427 if (shutdown_in_progress()) {
428 rec->set_next(NULL);
429 delete rec;
430 return;
431 }
432
433 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
434 rec->set_next(cur_head);
435 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
436 (void*)cur_head)) {
461 // Some GC tests hit large number of safepoints in short period of time
462 // without meaningful activities. We should prevent going to
463 // sync point in these cases, which can potentially exhaust generation buffer.
464 // Here is the factots to determine if we should go into sync point:
465 // 1. not to overflow sequence number
466 // 2. if we are in danger to overflow generation buffer
467 // 3. how many safepoints we already skipped sync point
468 if (_state == NMT_started) {
469 // worker thread is not ready, no one can manage generation
470 // buffer, so skip this safepoint
471 if (_worker_thread == NULL) return;
472
473 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
474 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
475 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
476 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
477 _sync_point_skip_count ++;
478 return;
479 }
480 }
481 _sync_point_skip_count = 0;
482 {
483 // This method is running at safepoint, with ThreadCritical lock,
484 // it should guarantee that NMT is fully sync-ed.
485 ThreadCritical tc;
486
487 SequenceGenerator::reset();
488
489 // walk all JavaThreads to collect recorders
490 SyncThreadRecorderClosure stc;
491 Threads::threads_do(&stc);
492
493 _thread_count = stc.get_thread_count();
494 MemRecorder* pending_recorders = get_pending_recorders();
495
496 if (_global_recorder != NULL) {
497 _global_recorder->set_next(pending_recorders);
498 pending_recorders = _global_recorder;
499 _global_recorder = NULL;
500 }
501
502 // see if NMT has too many outstanding recorder instances, it usually
503 // means that worker thread is lagging behind in processing them.
504 if (!AutoShutdownNMT) {
505 _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
506 }
507
508 // check _worker_thread with lock to avoid racing condition
509 if (_worker_thread != NULL) {
510 _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
511 }
512
513 assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
514 }
515 }
516
517 // now, it is the time to shut whole things off
518 if (_state == NMT_final_shutdown) {
519 // walk all JavaThreads to delete all recorders
520 SyncThreadRecorderClosure stc;
521 Threads::threads_do(&stc);
522 // delete global recorder
523 {
524 ThreadCritical tc;
525 if (_global_recorder != NULL) {
526 delete _global_recorder;
527 _global_recorder = NULL;
528 }
529 }
530 MemRecorder* pending_recorders = get_pending_recorders();
531 if (pending_recorders != NULL) {
532 delete pending_recorders;
533 }
682 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
683 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
684 if (_worker_thread != NULL) {
685 st->print_cr("\tWorker thread:");
686 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
687 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
688 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
689 } else {
690 st->print_cr("\tWorker thread is not started");
691 }
692 st->print_cr(" ");
693
694 if (_snapshot != NULL) {
695 _snapshot->print_snapshot_stats(st);
696 } else {
697 st->print_cr("No snapshot");
698 }
699 }
700 #endif
701
|
51 }
52 _thread_count ++;
53 }
54
55
56 MemRecorder* volatile MemTracker::_global_recorder = NULL;
57 MemSnapshot* MemTracker::_snapshot = NULL;
58 MemBaseline MemTracker::_baseline;
59 Mutex* MemTracker::_query_lock = NULL;
60 MemRecorder* volatile MemTracker::_merge_pending_queue = NULL;
61 MemRecorder* volatile MemTracker::_pooled_recorders = NULL;
62 MemTrackWorker* MemTracker::_worker_thread = NULL;
63 int MemTracker::_sync_point_skip_count = 0;
64 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off;
65 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited;
66 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none;
67 int MemTracker::_thread_count = 255;
68 volatile jint MemTracker::_pooled_recorder_count = 0;
69 volatile unsigned long MemTracker::_processing_generation = 0;
70 volatile bool MemTracker::_worker_thread_idle = false;
71 volatile jint MemTracker::_pending_op_count = 0;
72 volatile bool MemTracker::_slowdown_calling_thread = false;
73 debug_only(intx MemTracker::_main_thread_tid = 0;)
74 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;)
75
76 void MemTracker::init_tracking_options(const char* option_line) {
77 _tracking_level = NMT_off;
78 if (strcmp(option_line, "=summary") == 0) {
79 _tracking_level = NMT_summary;
80 } else if (strcmp(option_line, "=detail") == 0) {
81 _tracking_level = NMT_detail;
82 } else if (strcmp(option_line, "=off") != 0) {
83 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
84 }
85 }
86
87 // first phase of bootstrapping, when VM is still in single-threaded mode.
88 void MemTracker::bootstrap_single_thread() {
89 if (_tracking_level > NMT_off) {
90 assert(_state == NMT_uninited, "wrong state");
91
312 void MemTracker::release_thread_recorder(MemRecorder* rec) {
313 assert(rec != NULL, "null recorder");
314 // we don't want to pool too many recorders
315 rec->set_next(NULL);
316 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
317 delete rec;
318 return;
319 }
320
321 rec->clear();
322 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
323 rec->set_next(cur_head);
324 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
325 (void*)cur_head)) {
326 cur_head = const_cast<MemRecorder*>(_pooled_recorders);
327 rec->set_next(cur_head);
328 }
329 Atomic::inc(&_pooled_recorder_count);
330 }
331
332 // write a record to proper recorder. No lock can be taken from this method
333 // down.
334 void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
335 size_t size, jint seq, address pc, JavaThread* thread) {
336
337 MemRecorder* rc = get_thread_recorder(thread);
338 if (rc != NULL) {
339 rc->record(addr, flags, size, seq, pc);
340 }
341 }
342
343 /**
344 * enqueue a recorder to pending queue
345 */
346 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
347 assert(rec != NULL, "null recorder");
348
349 // we are shutting down, so just delete it
350 if (shutdown_in_progress()) {
351 rec->set_next(NULL);
352 delete rec;
353 return;
354 }
355
356 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
357 rec->set_next(cur_head);
358 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
359 (void*)cur_head)) {
384 // Some GC tests hit large number of safepoints in short period of time
385 // without meaningful activities. We should prevent going to
386 // sync point in these cases, which can potentially exhaust generation buffer.
387 // Here is the factots to determine if we should go into sync point:
388 // 1. not to overflow sequence number
389 // 2. if we are in danger to overflow generation buffer
390 // 3. how many safepoints we already skipped sync point
391 if (_state == NMT_started) {
392 // worker thread is not ready, no one can manage generation
393 // buffer, so skip this safepoint
394 if (_worker_thread == NULL) return;
395
396 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
397 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
398 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
399 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
400 _sync_point_skip_count ++;
401 return;
402 }
403 }
404 {
405 // This method is running at safepoint, with ThreadCritical lock,
406 // it should guarantee that NMT is fully sync-ed.
407 ThreadCritical tc;
408
409 // We can NOT execute NMT sync-point if there are pending tracking ops.
410 if (_pending_op_count == 0) {
411 SequenceGenerator::reset();
412 _sync_point_skip_count = 0;
413
414 // walk all JavaThreads to collect recorders
415 SyncThreadRecorderClosure stc;
416 Threads::threads_do(&stc);
417
418 _thread_count = stc.get_thread_count();
419 MemRecorder* pending_recorders = get_pending_recorders();
420
421 if (_global_recorder != NULL) {
422 _global_recorder->set_next(pending_recorders);
423 pending_recorders = _global_recorder;
424 _global_recorder = NULL;
425 }
426
427 // see if NMT has too many outstanding recorder instances, it usually
428 // means that worker thread is lagging behind in processing them.
429 if (!AutoShutdownNMT) {
430 _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
431 }
432
433 // check _worker_thread with lock to avoid racing condition
434 if (_worker_thread != NULL) {
435 _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
436 }
437
438 assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
439 } else {
440 _sync_point_skip_count ++;
441 }
442 }
443 }
444
445 // now, it is the time to shut whole things off
446 if (_state == NMT_final_shutdown) {
447 // walk all JavaThreads to delete all recorders
448 SyncThreadRecorderClosure stc;
449 Threads::threads_do(&stc);
450 // delete global recorder
451 {
452 ThreadCritical tc;
453 if (_global_recorder != NULL) {
454 delete _global_recorder;
455 _global_recorder = NULL;
456 }
457 }
458 MemRecorder* pending_recorders = get_pending_recorders();
459 if (pending_recorders != NULL) {
460 delete pending_recorders;
461 }
610 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
611 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
612 if (_worker_thread != NULL) {
613 st->print_cr("\tWorker thread:");
614 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
615 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
616 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
617 } else {
618 st->print_cr("\tWorker thread is not started");
619 }
620 st->print_cr(" ");
621
622 if (_snapshot != NULL) {
623 _snapshot->print_snapshot_stats(st);
624 } else {
625 st->print_cr("No snapshot");
626 }
627 }
628 #endif
629
630
631 // NMTTrackOp Implementation
632
633 /*
634 * Create a NMT tracking OP.
635 * This is a fairly complicated constructor, as it has to make two important decisions:
636 * 1) Does it need to take ThreadCritical lock to write tracking record
637 * 2) Does it need to pre-reserve a sequence number for the tracking record
638 *
639 * The rules to determine if ThreadCritical is needed:
640 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
641 * still in single thread mode.
642 * 2. For all threads other than JavaThread, ThreadCritical is needed
643 * to write to recorders to global recorder.
644 * 3. For JavaThreads that are not longer visible by safepoint, also
645 * need to take ThreadCritical and records are written to global
646 * recorders, since these threads are NOT walked by Threads.do_thread().
647 * 4. JavaThreads that are running in safepoint-safe states do not stop
648 * for safepoints, ThreadCritical lock shoul be taken to write
649 * memory records.
650 * 5. JavaThreads that are running in VM state do not need any lock and
651 * records are written to per-thread recorders.
652 * 6. For a thread has yet to attach VM 'Thread', they need to take
653 * ThreadCritical to write to global recorder.
654 *
655 * The memory operations that need pre-reserve sequence numbers:
656 * The memory operations that "release" memory blocks and the
657 * operations can fail, need to pre-reserve sequence number. They
658 * are realloc, uncommit and release.
659 *
660 * The reason for pre-reserve sequence number, is to prevent race condition:
661 * Thread 1 Thread 2
662 * <release>
663 * <allocate>
664 * <write allocate record>
665 * <write release record>
666 * if Thread 2 happens to obtain the memory address Thread 1 just released,
667 * then NMT can mistakenly report the memory is free.
668 *
669 * Noticeably, free() does not need pre-reserve sequence number, because the call
670 * does not fail, so we can alway write "release" record before the memory is actaully
671 * freed.
672 *
673 * For realloc, uncommit and release, following coding pattern should be used:
674 *
675 * NMTTrackOp op(ReallocOp); NMTTrackOp op(UncommitOp); NMTTrackOp op(ReleaseOp);
676 * ptr = ::realloc(...); if (!uncommit(...)) { if (!release(...)) {
677 * if (ptr == NULL) { op.abort_op(); op.abort_op();
678 * op.abort_op(); } else { } else
679 * } else { op.execute_op(....) op.execute_op(....);
680 * op.execute_op(...) } }
681 * }
682 *
683 * Since pre-reserved sequence number is only good for the generation that it is acquired,
684 * when there is pending NMTTrackOp that reserved sequence number, NMT sync-point has
685 * to be skipped to prevent advancing generation. This is done by inc and dec
686 * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
687 * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
688 * that honor safepoints, safepoint can not occur during the memory operations, so the
689 * pre-reserved sequence number won't cross the generation boundry.
690 */
691 NMTTrackOp::NMTTrackOp(NMTMemoryOps op, Thread* thr) {
692 _op = NoOp;
693 _seq = 0;
694 if (MemTracker::is_on()) {
695 _java_thread = NULL;
696 _op = op;
697
698 // figure out if ThreadCritical lock is needed to write this operation
699 // to MemTracker
700 if (MemTracker::is_single_threaded_bootstrap()) {
701 thr = NULL;
702 } else if (thr == NULL) {
703 // don't use Thread::current(), since it is possible that
704 // the calling thread has yet to attach to VM 'Thread',
705 // which will result assertion failure
706 thr = ThreadLocalStorage::thread();
707 }
708
709 if (thr != NULL) {
710 // Check NMT load
711 MemTracker::check_NMT_load(thr);
712
713 if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
714 _java_thread = (JavaThread*)thr;
715 JavaThreadState state = _java_thread->thread_state();
716 // JavaThreads that are safepoint safe, can run through safepoint,
717 // so ThreadCritical is needed to ensure no threads at safepoint create
718 // new records while the records are being gathered and the sequence number is changing
719 _need_thread_critical_lock =
720 SafepointSynchronize::safepoint_safe(_java_thread, state);
721 } else {
722 _need_thread_critical_lock = true;
723 }
724 } else {
725 _need_thread_critical_lock
726 = !MemTracker::is_single_threaded_bootstrap();
727 }
728
729 // see if we need to pre-reserve sequence number for this operation
730 switch(_op) {
731 case MallocOp:
732 case FreeOp:
733 case ReserveOp:
734 case CommitOp:
735 case ReserveAndCommitOp:
736 case TypeOp:
737 case ArenaSizeOp:
738 case StackReleaseOp:
739 // we don't need to pre-reserve sequence number
740 // for above ops
741 _seq = 0;
742 break;
743 case ReallocOp:
744 case UncommitOp:
745 case ReleaseOp: {
746 if (_need_thread_critical_lock) {
747 ThreadCritical tc;
748 MemTracker::inc_pending_op_count();
749 _seq = SequenceGenerator::next();
750 } else {
751 _seq = SequenceGenerator::next();
752 }
753 break;
754 }
755 default: ShouldNotReachHere();
756 }
757 }
758 }
759
760 void NMTTrackOp::abort_op() {
761 if (MemTracker::is_on() && _seq != 0 && _need_thread_critical_lock) {
762 ThreadCritical tc;
763 MemTracker::dec_pending_op_count();
764 }
765 }
766
767
768 void NMTTrackOp::execute_op(address old_addr, address new_addr, size_t size,
769 MEMFLAGS flags, address pc) {
770 assert(old_addr != NULL && new_addr != NULL, "Sanity check");
771 assert(_op == ReallocOp || _op == NoOp, "Wrong call");
772 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
773 assert(_seq > 0, "Need pre-reserve sequence number");
774 if (_need_thread_critical_lock) {
775 ThreadCritical tc;
776 // free old address, use pre-reserved sequence number
777 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
778 0, _seq, pc, _java_thread);
779 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
780 size, SequenceGenerator::next(), pc, _java_thread);
781 // decrement MemTracker pending_op_count
782 MemTracker::dec_pending_op_count();
783 } else {
784 // free old address, use pre-reserved sequence number
785 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
786 0, _seq, pc, _java_thread);
787 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
788 size, SequenceGenerator::next(), pc, _java_thread);
789 }
790 }
791 }
792
793 void NMTTrackOp::execute_op(address addr, size_t size, MEMFLAGS flags, address pc) {
794 assert(addr != NULL, "Sanity check");
795 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
796 bool pre_reserved_seq = (_seq != 0);
797 address pc = CALLER_CALLER_PC;
798 MEMFLAGS orig_flags = flags;
799
800 switch(_op) {
801 case MallocOp:
802 flags |= MemPointerRecord::malloc_tag();
803 break;
804 case FreeOp:
805 flags = MemPointerRecord::free_tag();
806 break;
807 case ReallocOp:
808 fatal("Usae the other NMTTrackOp::execute_op()");
809 break;
810 case ReserveOp:
811 case ReserveAndCommitOp:
812 flags |= MemPointerRecord::virtual_memory_reserve_tag();
813 break;
814 case CommitOp:
815 flags = MemPointerRecord::virtual_memory_commit_tag();
816 break;
817 case TypeOp:
818 flags |= MemPointerRecord::virtual_memory_type_tag();
819 break;
820 case UncommitOp:
821 assert(pre_reserved_seq, "Need pre-reserve sequence number");
822 flags = MemPointerRecord::virtual_memory_uncommit_tag();
823 break;
824 case ReleaseOp:
825 assert(pre_reserved_seq, "Need pre-reserve sequence number");
826 flags = MemPointerRecord::virtual_memory_release_tag();
827 break;
828 case ArenaSizeOp:
829 flags = MemPointerRecord::arena_size_tag();
830 addr += sizeof(void*);
831 break;
832 case StackReleaseOp:
833 flags = MemPointerRecord::virtual_memory_release_tag();
834 break;
835 default:
836 ShouldNotReachHere();
837 }
838
839 // write memory tracking record
840 if (_need_thread_critical_lock) {
841 ThreadCritical tc;
842 if (_seq == 0) _seq = SequenceGenerator::next();
843 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
844 if (_op == ReserveAndCommitOp) {
845 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
846 size, SequenceGenerator::next(), pc, _java_thread);
847 }
848 if (pre_reserved_seq) MemTracker::dec_pending_op_count();
849 } else {
850 if (_seq == 0) _seq = SequenceGenerator::next();
851 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
852 if (_op == ReserveAndCommitOp) {
853 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
854 size, SequenceGenerator::next(), pc, _java_thread);
855 }
856 }
857 #ifdef ASSERT
858 // to prevent from incorrectly reusing this op
859 _seq = 0;
860 #endif
861 }
862 }
863
|