109 char* heap_address;
110 size_t total_reserved = 0;
111 int n_covered_regions = 0;
112 ReservedSpace heap_rs;
113
114 size_t heap_alignment = collector_policy()->heap_alignment();
115
116 heap_address = allocate(heap_alignment, &total_reserved,
117 &n_covered_regions, &heap_rs);
118
119 if (!heap_rs.is_reserved()) {
120 vm_shutdown_during_initialization(
121 "Could not reserve enough space for object heap");
122 return JNI_ENOMEM;
123 }
124
125 _reserved = MemRegion((HeapWord*)heap_rs.base(),
126 (HeapWord*)(heap_rs.base() + heap_rs.size()));
127
128 // It is important to do this in a way such that concurrent readers can't
129 // temporarily think somethings in the heap. (Seen this happen in asserts.)
130 _reserved.set_word_size(0);
131 _reserved.set_start((HeapWord*)heap_rs.base());
132 size_t actual_heap_size = heap_rs.size();
133 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
134
135 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
136 set_barrier_set(rem_set()->bs());
137
138 _gch = this;
139
140 for (i = 0; i < _n_gens; i++) {
141 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
142 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
143 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
144 }
145 clear_incremental_collection_failed();
146
147 #if INCLUDE_ALL_GCS
148 // If we are running CMS, create the collector responsible
149 // for collecting the CMS generations.
1246 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1247 }
1248 return oop(result);
1249 }
1250
1251 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1252 jlong _time; // in ms
1253 jlong _now; // in ms
1254
1255 public:
1256 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1257
1258 jlong time() { return _time; }
1259
1260 void do_generation(Generation* gen) {
1261 _time = MIN2(_time, gen->time_of_last_gc(_now));
1262 }
1263 };
1264
1265 jlong GenCollectedHeap::millis_since_last_gc() {
1266 // We need a monotonically non-deccreasing time in ms but
1267 // os::javaTimeMillis() does not guarantee monotonicity.
1268 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1269 GenTimeOfLastGCClosure tolgc_cl(now);
1270 // iterate over generations getting the oldest
1271 // time that a generation was collected
1272 generation_iterate(&tolgc_cl, false);
1273
1274 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1275 // provided the underlying platform provides such a time source
1276 // (and it is bug free). So we still have to guard against getting
1277 // back a time later than 'now'.
1278 jlong retVal = now - tolgc_cl.time();
1279 if (retVal < 0) {
1280 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1281 return 0;
1282 }
1283 return retVal;
1284 }
|
109 char* heap_address;
110 size_t total_reserved = 0;
111 int n_covered_regions = 0;
112 ReservedSpace heap_rs;
113
114 size_t heap_alignment = collector_policy()->heap_alignment();
115
116 heap_address = allocate(heap_alignment, &total_reserved,
117 &n_covered_regions, &heap_rs);
118
119 if (!heap_rs.is_reserved()) {
120 vm_shutdown_during_initialization(
121 "Could not reserve enough space for object heap");
122 return JNI_ENOMEM;
123 }
124
125 _reserved = MemRegion((HeapWord*)heap_rs.base(),
126 (HeapWord*)(heap_rs.base() + heap_rs.size()));
127
128 // It is important to do this in a way such that concurrent readers can't
129 // temporarily think something is in the heap. (Seen this happen in asserts.)
130 _reserved.set_word_size(0);
131 _reserved.set_start((HeapWord*)heap_rs.base());
132 size_t actual_heap_size = heap_rs.size();
133 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
134
135 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
136 set_barrier_set(rem_set()->bs());
137
138 _gch = this;
139
140 for (i = 0; i < _n_gens; i++) {
141 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
142 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
143 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
144 }
145 clear_incremental_collection_failed();
146
147 #if INCLUDE_ALL_GCS
148 // If we are running CMS, create the collector responsible
149 // for collecting the CMS generations.
1246 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1247 }
1248 return oop(result);
1249 }
1250
1251 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1252 jlong _time; // in ms
1253 jlong _now; // in ms
1254
1255 public:
1256 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1257
1258 jlong time() { return _time; }
1259
1260 void do_generation(Generation* gen) {
1261 _time = MIN2(_time, gen->time_of_last_gc(_now));
1262 }
1263 };
1264
1265 jlong GenCollectedHeap::millis_since_last_gc() {
1266 // We need a monotonically non-decreasing time in ms but
1267 // os::javaTimeMillis() does not guarantee monotonicity.
1268 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1269 GenTimeOfLastGCClosure tolgc_cl(now);
1270 // iterate over generations getting the oldest
1271 // time that a generation was collected
1272 generation_iterate(&tolgc_cl, false);
1273
1274 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1275 // provided the underlying platform provides such a time source
1276 // (and it is bug free). So we still have to guard against getting
1277 // back a time later than 'now'.
1278 jlong retVal = now - tolgc_cl.time();
1279 if (retVal < 0) {
1280 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1281 return 0;
1282 }
1283 return retVal;
1284 }
|