156 157 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 158 trace_heap(GCWhen::BeforeGC, gc_tracer); 159 } 160 161 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 162 trace_heap(GCWhen::AfterGC, gc_tracer); 163 } 164 165 // WhiteBox API support for concurrent collectors. These are the 166 // default implementations, for collectors which don't support this 167 // feature. 168 bool CollectedHeap::supports_concurrent_phase_control() const { 169 return false; 170 } 171 172 bool CollectedHeap::request_concurrent_phase(const char* phase) { 173 return false; 174 } 175 176 bool CollectedHeap::is_oop(oop object) const { 177 if (!is_object_aligned(object)) { 178 return false; 179 } 180 181 if (!is_in(object)) { 182 return false; 183 } 184 185 if (is_in(object->klass_or_null())) { 186 return false; 187 } 188 189 return true; 190 } 191 192 // Memory state functions. 193 194 195 CollectedHeap::CollectedHeap() : 326 } 327 } while (true); // Until a GC is done 328 } 329 330 MemoryUsage CollectedHeap::memory_usage() { 331 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity()); 332 } 333 334 335 #ifndef PRODUCT 336 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 337 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 338 for (size_t slot = 0; slot < size; slot += 1) { 339 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 340 "Found non badHeapWordValue in pre-allocation check"); 341 } 342 } 343 } 344 #endif // PRODUCT 345 346 void CollectedHeap::check_oop_location(void* addr) const { 347 assert(is_object_aligned(addr), "address is not aligned"); 348 assert(_reserved.contains(addr), "address is not in reserved heap"); 349 } 350 351 size_t CollectedHeap::max_tlab_size() const { 352 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 353 // This restriction could be removed by enabling filling with multiple arrays. 354 // If we compute that the reasonable way as 355 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 356 // we'll overflow on the multiply, so we do the divide first. 357 // We actually lose a little by dividing first, 358 // but that just makes the TLAB somewhat smaller than the biggest array, 359 // which is fine, since we'll be able to fill that. 360 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 361 sizeof(jint) * 362 ((juint) max_jint / (size_t) HeapWordSize); 363 return align_down(max_int_size, MinObjAlignment); 364 } 365 366 size_t CollectedHeap::filler_array_hdr_size() { 367 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 368 } 369 370 size_t CollectedHeap::filler_array_min_size() { 371 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 372 } 373 374 #ifdef ASSERT 375 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 376 { 377 assert(words >= min_fill_size(), "too small to fill"); 378 assert(is_object_aligned(words), "unaligned size"); 379 DEBUG_ONLY(Universe::heap()->check_oop_location(start);) 380 DEBUG_ONLY(Universe::heap()->check_oop_location(start + words - MinObjAlignment);) 381 } 382 383 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 384 { 385 if (ZapFillerObjects && zap) { 386 Copy::fill_to_words(start + filler_array_hdr_size(), 387 words - filler_array_hdr_size(), 0XDEAFBABE); 388 } 389 } 390 #endif // ASSERT 391 392 void 393 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 394 { 395 assert(words >= filler_array_min_size(), "too small for an array"); 396 assert(words <= filler_array_max_size(), "too big for a single object"); 397 398 const size_t payload_size = words - filler_array_hdr_size(); 399 const size_t len = payload_size * HeapWordSize / sizeof(jint); 400 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); | 156 157 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 158 trace_heap(GCWhen::BeforeGC, gc_tracer); 159 } 160 161 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 162 trace_heap(GCWhen::AfterGC, gc_tracer); 163 } 164 165 // WhiteBox API support for concurrent collectors. These are the 166 // default implementations, for collectors which don't support this 167 // feature. 168 bool CollectedHeap::supports_concurrent_phase_control() const { 169 return false; 170 } 171 172 bool CollectedHeap::request_concurrent_phase(const char* phase) { 173 return false; 174 } 175 176 bool CollectedHeap::is_oop_location(void* addr) const { 177 if (!is_object_aligned(addr)) { 178 return false; 179 } 180 181 if (!_reserved.contains(addr)) { 182 return false; 183 } 184 185 return true; 186 } 187 188 bool CollectedHeap::is_oop(oop object) const { 189 if (!is_object_aligned(object)) { 190 return false; 191 } 192 193 if (!is_in(object)) { 194 return false; 195 } 196 197 if (is_in(object->klass_or_null())) { 198 return false; 199 } 200 201 return true; 202 } 203 204 // Memory state functions. 205 206 207 CollectedHeap::CollectedHeap() : 338 } 339 } while (true); // Until a GC is done 340 } 341 342 MemoryUsage CollectedHeap::memory_usage() { 343 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity()); 344 } 345 346 347 #ifndef PRODUCT 348 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 349 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 350 for (size_t slot = 0; slot < size; slot += 1) { 351 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 352 "Found non badHeapWordValue in pre-allocation check"); 353 } 354 } 355 } 356 #endif // PRODUCT 357 358 size_t CollectedHeap::max_tlab_size() const { 359 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 360 // This restriction could be removed by enabling filling with multiple arrays. 361 // If we compute that the reasonable way as 362 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 363 // we'll overflow on the multiply, so we do the divide first. 364 // We actually lose a little by dividing first, 365 // but that just makes the TLAB somewhat smaller than the biggest array, 366 // which is fine, since we'll be able to fill that. 367 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 368 sizeof(jint) * 369 ((juint) max_jint / (size_t) HeapWordSize); 370 return align_down(max_int_size, MinObjAlignment); 371 } 372 373 size_t CollectedHeap::filler_array_hdr_size() { 374 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 375 } 376 377 size_t CollectedHeap::filler_array_min_size() { 378 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 379 } 380 381 #ifdef ASSERT 382 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 383 { 384 assert(words >= min_fill_size(), "too small to fill"); 385 assert(is_object_aligned(words), "unaligned size"); 386 assert(Universe::heap()->is_oop_location(start), "invalid address"); 387 assert(Universe::heap()->is_oop_location(start + words - MinObjAlignment), "invalid address"); 388 } 389 390 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 391 { 392 if (ZapFillerObjects && zap) { 393 Copy::fill_to_words(start + filler_array_hdr_size(), 394 words - filler_array_hdr_size(), 0XDEAFBABE); 395 } 396 } 397 #endif // ASSERT 398 399 void 400 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 401 { 402 assert(words >= filler_array_min_size(), "too small for an array"); 403 assert(words <= filler_array_max_size(), "too big for a single object"); 404 405 const size_t payload_size = words - filler_array_hdr_size(); 406 const size_t len = payload_size * HeapWordSize / sizeof(jint); 407 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); |