< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




 322   // the first region.
 323   HeapWord* new_obj = first_hr->bottom();
 324   // This will be the new top of the new object.
 325   HeapWord* obj_top = new_obj + word_size;
 326 
 327   // First, we need to zero the header of the space that we will be
 328   // allocating. When we update top further down, some refinement
 329   // threads might try to scan the region. By zeroing the header we
 330   // ensure that any thread that will try to scan the region will
 331   // come across the zero klass word and bail out.
 332   //
 333   // NOTE: It would not have been correct to have used
 334   // CollectedHeap::fill_with_object() and make the space look like
 335   // an int array. The thread that is doing the allocation will
 336   // later update the object header to a potentially different array
 337   // type and, for a very short period of time, the klass and length
 338   // fields will be inconsistent. This could cause a refinement
 339   // thread to calculate the object size incorrectly.
 340   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 341 





 342   // We will set up the first region as "starts humongous". This
 343   // will also update the BOT covering all the regions to reflect
 344   // that there is a single object that starts at the bottom of the
 345   // first region.
 346   first_hr->set_starts_humongous(obj_top);
 347   first_hr->set_allocation_context(context);
 348   // Then, if there are any, we will set up the "continues
 349   // humongous" regions.
 350   HeapRegion* hr = NULL;
 351   for (uint i = first + 1; i < last; ++i) {
 352     hr = region_at(i);
 353     hr->set_continues_humongous(first_hr);
 354     hr->set_allocation_context(context);
 355   }
 356 
 357   // Up to this point no concurrent thread would have been able to
 358   // do any scanning on any region in this series. All the top
 359   // fields still point to bottom, so the intersection between
 360   // [bottom,top] and [card_start,card_end] will be empty. Before we
 361   // update the top fields, we'll do a storestore to make sure that
 362   // no thread sees the update to top before the zeroing of the
 363   // object header and the BOT initialization.
 364   OrderAccess::storestore();
 365 
 366   // Now that the BOT and the object header have been initialized,
 367   // we can update top of the "starts humongous" region.
 368   first_hr->set_top(MIN2(first_hr->end(), obj_top));
 369   if (_hr_printer.is_active()) {
 370     _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->top());
 371   }
 372 
 373   // Now, we will update the top fields of the "continues humongous"
 374   // regions.
 375   hr = NULL;
 376   for (uint i = first + 1; i < last; ++i) {
 377     hr = region_at(i);
 378     if ((i + 1) == last) {
 379       // last continues humongous region
 380       assert(hr->bottom() < obj_top && obj_top <= hr->end(),
 381              "new_top should fall on this region");
 382       hr->set_top(obj_top);
 383       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, obj_top);
 384     } else {
 385       // not last one
 386       assert(obj_top > hr->end(), "obj_top should be above this region");
 387       hr->set_top(hr->end());

 388       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 389     }
 390   }
 391   // If we have continues humongous regions (hr != NULL), its top should
 392   // match obj_top.
 393   assert(hr == NULL || (hr->top() == obj_top), "sanity");

 394   check_bitmaps("Humongous Region Allocation", first_hr);
 395 
 396   increase_used(word_size * HeapWordSize);
 397 
 398   for (uint i = first; i < last; ++i) {
 399     _humongous_set.add(region_at(i));
 400   }
 401 
 402   return new_obj;
 403 }
 404 
 405 // If could fit into free regions w/o expansion, try.
 406 // Otherwise, if can expand, do so.
 407 // Otherwise, if using ex regions might help, try with ex given back.
 408 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 409   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 410 
 411   verify_region_sets_optional();
 412 
 413   uint first = G1_NO_HRM_INDEX;
 414   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 415 
 416   if (obj_regions == 1) {




 322   // the first region.
 323   HeapWord* new_obj = first_hr->bottom();
 324   // This will be the new top of the new object.
 325   HeapWord* obj_top = new_obj + word_size;
 326 
 327   // First, we need to zero the header of the space that we will be
 328   // allocating. When we update top further down, some refinement
 329   // threads might try to scan the region. By zeroing the header we
 330   // ensure that any thread that will try to scan the region will
 331   // come across the zero klass word and bail out.
 332   //
 333   // NOTE: It would not have been correct to have used
 334   // CollectedHeap::fill_with_object() and make the space look like
 335   // an int array. The thread that is doing the allocation will
 336   // later update the object header to a potentially different array
 337   // type and, for a very short period of time, the klass and length
 338   // fields will be inconsistent. This could cause a refinement
 339   // thread to calculate the object size incorrectly.
 340   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 341 
 342   size_t fill_size = word_size_sum - word_size;
 343   if (fill_size > 0) {
 344     fill_with_objects(obj_top, fill_size);
 345   }
 346 
 347   // We will set up the first region as "starts humongous". This
 348   // will also update the BOT covering all the regions to reflect
 349   // that there is a single object that starts at the bottom of the
 350   // first region.
 351   first_hr->set_starts_humongous(first_hr->bottom() + word_size_sum);
 352   first_hr->set_allocation_context(context);
 353   // Then, if there are any, we will set up the "continues
 354   // humongous" regions.
 355   HeapRegion* hr = NULL;
 356   for (uint i = first + 1; i < last; ++i) {
 357     hr = region_at(i);
 358     hr->set_continues_humongous(first_hr);
 359     hr->set_allocation_context(context);
 360   }
 361 
 362   // Up to this point no concurrent thread would have been able to
 363   // do any scanning on any region in this series. All the top
 364   // fields still point to bottom, so the intersection between
 365   // [bottom,top] and [card_start,card_end] will be empty. Before we
 366   // update the top fields, we'll do a storestore to make sure that
 367   // no thread sees the update to top before the zeroing of the
 368   // object header and the BOT initialization.
 369   OrderAccess::storestore();
 370 
 371   // Now that the BOT and the object header have been initialized,
 372   // we can update top of the "starts humongous" region.
 373   first_hr->set_top(first_hr->end());
 374   if (_hr_printer.is_active()) {
 375     _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->end());
 376   }
 377 
 378   // Now, we will update the top fields of the "continues humongous"
 379   // regions.
 380   hr = NULL;
 381   for (uint i = first + 1; i < last; ++i) {
 382     hr = region_at(i);









 383     hr->set_top(hr->end());
 384     if (_hr_printer.is_active()) {
 385       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 386     }
 387   }
 388 
 389   assert(hr == NULL || (hr->bottom() < obj_top && obj_top <= hr->end()),
 390          "obj_top should be in last region");
 391 
 392   check_bitmaps("Humongous Region Allocation", first_hr);
 393 
 394   increase_used(word_size_sum * HeapWordSize);
 395 
 396   for (uint i = first; i < last; ++i) {
 397     _humongous_set.add(region_at(i));
 398   }
 399 
 400   return new_obj;
 401 }
 402 
 403 // If could fit into free regions w/o expansion, try.
 404 // Otherwise, if can expand, do so.
 405 // Otherwise, if using ex regions might help, try with ex given back.
 406 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 407   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 408 
 409   verify_region_sets_optional();
 410 
 411   uint first = G1_NO_HRM_INDEX;
 412   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 413 
 414   if (obj_regions == 1) {


< prev index next >