1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/threadCritical.hpp"
  27 #include "services/memTracker.hpp"
  28 #include "services/memTrackWorker.hpp"
  29 #include "utilities/decoder.hpp"
  30 #include "utilities/vmError.hpp"
  31 
  32 MemTrackWorker::MemTrackWorker() {
  33   // create thread uses cgc thread type for now. We should revisit
  34   // the option, or create new thread type.
  35   _has_error = !os::create_thread(this, os::cgc_thread);
  36   set_name("MemTrackWorker", 0);
  37 
  38   // initial generation circuit buffer
  39   if (!has_error()) {
  40     _head = _tail = 0;
  41     for(int index = 0; index < MAX_GENERATIONS; index ++) {
  42       _gen[index] = NULL;
  43     }
  44   }
  45   NOT_PRODUCT(_sync_point_count = 0;)
  46   NOT_PRODUCT(_merge_count = 0;)
  47   NOT_PRODUCT(_last_gen_in_use = 0;)
  48 }
  49 
  50 MemTrackWorker::~MemTrackWorker() {
  51   for (int index = 0; index < MAX_GENERATIONS; index ++) {
  52     MemRecorder* rc = _gen[index];
  53     if (rc != NULL) {
  54       delete rc;
  55     }
  56   }
  57 }
  58 
  59 void* MemTrackWorker::operator new(size_t size) {
  60   assert(false, "use nothrow version");
  61   return NULL;
  62 }
  63 
  64 void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
  65   return allocate(size, false, mtNMT);
  66 }
  67 
  68 void MemTrackWorker::start() {
  69   os::start_thread(this);
  70 }
  71 
  72 /*
  73  * Native memory tracking worker thread loop:
  74  *   1. merge one generation of memory recorders to staging area
  75  *   2. promote staging data to memory snapshot
  76  *
  77  * This thread can run through safepoint.
  78  */
  79 
  80 void MemTrackWorker::run() {
  81   assert(MemTracker::is_on(), "native memory tracking is off");
  82   this->initialize_thread_local_storage();
  83   this->record_stack_base_and_size();
  84   MemSnapshot* snapshot = MemTracker::get_snapshot();
  85   assert(snapshot != NULL, "Worker should not be started");
  86   MemRecorder* rec;
  87   unsigned long processing_generation = 0;
  88   bool          worker_idle = false;
  89 
  90   while (!MemTracker::shutdown_in_progress()) {
  91     NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
  92     {
  93       // take a recorder from earliest generation in buffer
  94       ThreadCritical tc;
  95       rec = _gen[_head];
  96       if (rec != NULL) {
  97         _gen[_head] = rec->next();
  98       }
  99       assert(count_recorder(_gen[_head]) <= MemRecorder::_instance_count,
 100         "infinite loop after dequeue");
 101     }
 102     if (rec != NULL) {
 103       if (rec->get_generation() != processing_generation || worker_idle) {
 104         processing_generation = rec->get_generation();
 105         worker_idle = false; 
 106         MemTracker::set_current_processing_generation(processing_generation);
 107       }
 108 
 109       // merge the recorder into staging area
 110       if (!snapshot->merge(rec)) {
 111         MemTracker::shutdown(MemTracker::NMT_out_of_memory);
 112       } else {
 113         NOT_PRODUCT(_merge_count ++;)
 114       }
 115       MemTracker::release_thread_recorder(rec);
 116     } else {
 117       // no more recorder to merge, promote staging area
 118       // to snapshot
 119       if (_head != _tail) {
 120         {
 121           ThreadCritical tc;
 122           if (_gen[_head] != NULL || _head == _tail) {
 123             continue;
 124           }
 125           // done with this generation, increment _head pointer
 126           _head = (_head + 1) % MAX_GENERATIONS;
 127         }
 128         // promote this generation data to snapshot
 129         if (!snapshot->promote()) {
 130           // failed to promote, means out of memory
 131           MemTracker::shutdown(MemTracker::NMT_out_of_memory);
 132         }
 133       } else {
 134         // worker thread is idle
 135         worker_idle = true;
 136         MemTracker::report_worker_idle();
 137         snapshot->wait(1000);
 138         ThreadCritical tc;
 139         // check if more data arrived
 140         if (_gen[_head] == NULL) {
 141           _gen[_head] = MemTracker::get_pending_recorders();
 142         }
 143       }
 144     }
 145   }
 146   assert(MemTracker::shutdown_in_progress(), "just check");
 147 
 148   // transits to final shutdown
 149   MemTracker::final_shutdown();
 150 }
 151 
 152 // at synchronization point, where 'safepoint visible' Java threads are blocked
 153 // at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
 154 // The caller MemTracker::sync() already takes ThreadCritical before calling this
 155 // method.
 156 //
 157 // Following tasks are performed:
 158 //   1. add all recorders in pending queue to current generation
 159 //   2. increase generation
 160 
 161 void MemTrackWorker::at_sync_point(MemRecorder* rec) {
 162   NOT_PRODUCT(_sync_point_count ++;)
 163   assert(count_recorder(rec) <= MemRecorder::_instance_count,
 164     "pending queue has infinite loop");
 165 
 166   bool out_of_generation_buffer = false;
 167   // check shutdown state inside ThreadCritical
 168   if (MemTracker::shutdown_in_progress()) return;
 169   // append the recorders to the end of the generation
 170   if( rec != NULL) {
 171     MemRecorder* cur_head = _gen[_tail];
 172     if (cur_head == NULL) {
 173       _gen[_tail] = rec;
 174     } else {
 175       while (cur_head->next() != NULL) {
 176         cur_head = cur_head->next();
 177       }
 178       cur_head->set_next(rec);
 179     }
 180   }
 181   assert(count_recorder(rec) <= MemRecorder::_instance_count,
 182     "after add to current generation has infinite loop");
 183   // we have collected all recorders for this generation. If there is data,
 184   // we need to increment _tail to start a new generation.
 185   if (_gen[_tail] != NULL || _head == _tail) {
 186     _tail = (_tail + 1) % MAX_GENERATIONS;
 187     out_of_generation_buffer = (_tail == _head);
 188   }
 189 
 190   if (out_of_generation_buffer) {
 191     MemTracker::shutdown(MemTracker::NMT_out_of_generation);
 192   }
 193 }
 194 
 195 #ifndef PRODUCT
 196 int MemTrackWorker::count_recorder(const MemRecorder* head) {
 197   int count = 0;
 198   while(head != NULL) {
 199     count ++;
 200     head = head->next();
 201   }
 202   return count;
 203 }
 204 
 205 int MemTrackWorker::count_pending_recorders() const {
 206   int count = 0;
 207   for (int index = 0; index < MAX_GENERATIONS; index ++) {
 208     MemRecorder* head = _gen[index];
 209     if (head != NULL) {
 210       count += count_recorder(head);
 211     }
 212   }
 213   return count;
 214 }
 215 #endif