< prev index next >

src/share/vm/prims/whitebox.cpp

Print this page
rev 9277 : imported patch 8140597-forcing-initial-mark-causes-abort-mixed-collections


 305 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
 306   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 307   oop result = JNIHandles::resolve(obj);
 308   const HeapRegion* hr = g1->heap_region_containing(result);
 309   return hr->is_humongous();
 310 WB_END
 311 
 312 WB_ENTRY(jlong, WB_G1NumMaxRegions(JNIEnv* env, jobject o))
 313   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 314   size_t nr = g1->max_regions();
 315   return (jlong)nr;
 316 WB_END
 317 
 318 WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
 319   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 320   size_t nr = g1->num_free_regions();
 321   return (jlong)nr;
 322 WB_END
 323 
 324 WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
 325   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 326   return g1->concurrent_mark()->cmThread()->during_cycle();
 327 WB_END
 328 
 329 WB_ENTRY(jboolean, WB_G1StartMarkCycle(JNIEnv* env, jobject o))
 330   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 331   if (!g1h->concurrent_mark()->cmThread()->during_cycle()) {
 332     g1h->collect(GCCause::_wb_conc_mark);
 333     return true;
 334   }
 335   return false;
 336 WB_END
 337 
 338 WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
 339   return (jint)HeapRegion::GrainBytes;
 340 WB_END
 341 
 342 WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
 343   ParallelScavengeHeap* ps = ParallelScavengeHeap::heap();
 344   size_t alignment = ps->gens()->virtual_spaces()->alignment();
 345   return (jlong)alignment;
 346 WB_END




 305 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
 306   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 307   oop result = JNIHandles::resolve(obj);
 308   const HeapRegion* hr = g1->heap_region_containing(result);
 309   return hr->is_humongous();
 310 WB_END
 311 
 312 WB_ENTRY(jlong, WB_G1NumMaxRegions(JNIEnv* env, jobject o))
 313   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 314   size_t nr = g1->max_regions();
 315   return (jlong)nr;
 316 WB_END
 317 
 318 WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
 319   G1CollectedHeap* g1 = G1CollectedHeap::heap();
 320   size_t nr = g1->num_free_regions();
 321   return (jlong)nr;
 322 WB_END
 323 
 324 WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
 325   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 326   return g1h->concurrent_mark()->cmThread()->during_cycle();
 327 WB_END
 328 
 329 WB_ENTRY(jboolean, WB_G1StartMarkCycle(JNIEnv* env, jobject o))
 330   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 331   if (!g1h->concurrent_mark()->cmThread()->during_cycle()) {
 332     g1h->collect(GCCause::_wb_conc_mark);
 333     return true;
 334   }
 335   return false;
 336 WB_END
 337 
 338 WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
 339   return (jint)HeapRegion::GrainBytes;
 340 WB_END
 341 
 342 WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
 343   ParallelScavengeHeap* ps = ParallelScavengeHeap::heap();
 344   size_t alignment = ps->gens()->virtual_spaces()->alignment();
 345   return (jlong)alignment;
 346 WB_END


< prev index next >