334 return free_chunks(index);
335 }
336
337 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
338 // split up the larger chunk into n smaller chunks, at least one of which should be
339 // the target chunk of target chunk size. The smaller chunks, including the target
340 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
341 // Note that this chunk is supposed to be removed from the freelist right away.
342 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
343 assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
344
345 const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
346 const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
347
348 MetaWord* const region_start = (MetaWord*)larger_chunk;
349 const size_t region_word_len = larger_chunk->word_size();
350 MetaWord* const region_end = region_start + region_word_len;
351 VirtualSpaceNode* const vsn = larger_chunk->container();
352 OccupancyMap* const ocmap = vsn->occupancy_map();
353
354 // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
355 // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
356 // at an address suitable to place the smaller target chunk.
357 assert_is_aligned(region_start, target_chunk_word_size);
358
359 // Remove old chunk.
360 free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
361 larger_chunk->remove_sentinel();
362
363 // Prevent access to the old chunk from here on.
364 larger_chunk = NULL;
365 // ... and wipe it.
366 DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
367
368 // In its place create first the target chunk...
369 MetaWord* p = region_start;
370 Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
371 assert(target_chunk == (Metachunk*)p, "Sanity");
372 target_chunk->set_origin(origin_split);
373
495 return NULL;
496 }
497
498 // Remove the chunk as the head of the list.
499 free_list->remove_chunk(chunk);
500
501 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
502 p2i(free_list), free_list->count());
503
504 } else {
505 chunk = humongous_dictionary()->get_chunk(word_size);
506
507 if (chunk == NULL) {
508 return NULL;
509 }
510
511 log_trace(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
512 chunk->word_size(), word_size, chunk->word_size() - word_size);
513 }
514
515 // Chunk has been removed from the chunk manager; update counters.
516 account_for_removed_chunk(chunk);
517 do_update_in_use_info_for_chunk(chunk, true);
518 chunk->container()->inc_container_count();
519 chunk->inc_use_count();
520
521 // Remove it from the links to this freelist
522 chunk->set_next(NULL);
523 chunk->set_prev(NULL);
524
525 // Run some verifications (some more if we did a chunk split)
526 #ifdef ASSERT
527 if (VerifyMetaspace) {
528 locked_verify();
529 VirtualSpaceNode* const vsn = chunk->container();
530 vsn->verify();
531 if (we_did_split_a_chunk) {
532 vsn->verify_free_chunks_are_ideally_merged();
533 }
534 }
599 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
600 chunk_size_name(index), p2i(chunk), chunk->word_size());
601 }
602 chunk->container()->dec_container_count();
603 do_update_in_use_info_for_chunk(chunk, false);
604
605 // Chunk has been added; update counters.
606 account_for_added_chunk(chunk);
607
608 // Attempt coalesce returned chunks with its neighboring chunks:
609 // if this chunk is small or special, attempt to coalesce to a medium chunk.
610 if (index == SmallIndex || index == SpecializedIndex) {
611 if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
612 // This did not work. But if this chunk is special, we still may form a small chunk?
613 if (index == SpecializedIndex) {
614 if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
615 // give up.
616 }
617 }
618 }
619 }
620
621 }
622
623 void ChunkManager::return_chunk_list(Metachunk* chunks) {
624 if (chunks == NULL) {
625 return;
626 }
627 LogTarget(Trace, gc, metaspace, freelist) log;
628 if (log.is_enabled()) { // tracing
629 log.print("returning list of chunks...");
630 }
631 unsigned num_chunks_returned = 0;
632 size_t size_chunks_returned = 0;
633 Metachunk* cur = chunks;
634 while (cur != NULL) {
635 // Capture the next link before it is changed
636 // by the call to return_chunk_at_head();
637 Metachunk* next = cur->next();
638 if (log.is_enabled()) { // tracing
|
334 return free_chunks(index);
335 }
336
337 // Helper for chunk splitting: given a target chunk size and a larger free chunk,
338 // split up the larger chunk into n smaller chunks, at least one of which should be
339 // the target chunk of target chunk size. The smaller chunks, including the target
340 // chunk, are returned to the freelist. The pointer to the target chunk is returned.
341 // Note that this chunk is supposed to be removed from the freelist right away.
342 Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
343 assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
344
345 const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
346 const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
347
348 MetaWord* const region_start = (MetaWord*)larger_chunk;
349 const size_t region_word_len = larger_chunk->word_size();
350 MetaWord* const region_end = region_start + region_word_len;
351 VirtualSpaceNode* const vsn = larger_chunk->container();
352 OccupancyMap* const ocmap = vsn->occupancy_map();
353
354 if (AutoUncommitMetaChunks) {
355 larger_chunk->commit();
356 }
357
358 // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
359 // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
360 // at an address suitable to place the smaller target chunk.
361 assert_is_aligned(region_start, target_chunk_word_size);
362
363 // Remove old chunk.
364 free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
365 larger_chunk->remove_sentinel();
366
367 // Prevent access to the old chunk from here on.
368 larger_chunk = NULL;
369 // ... and wipe it.
370 DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
371
372 // In its place create first the target chunk...
373 MetaWord* p = region_start;
374 Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
375 assert(target_chunk == (Metachunk*)p, "Sanity");
376 target_chunk->set_origin(origin_split);
377
499 return NULL;
500 }
501
502 // Remove the chunk as the head of the list.
503 free_list->remove_chunk(chunk);
504
505 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
506 p2i(free_list), free_list->count());
507
508 } else {
509 chunk = humongous_dictionary()->get_chunk(word_size);
510
511 if (chunk == NULL) {
512 return NULL;
513 }
514
515 log_trace(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
516 chunk->word_size(), word_size, chunk->word_size() - word_size);
517 }
518
519 if (AutoUncommitMetaChunks) {
520 chunk->commit();
521 }
522
523 // Chunk has been removed from the chunk manager; update counters.
524 account_for_removed_chunk(chunk);
525 do_update_in_use_info_for_chunk(chunk, true);
526 chunk->container()->inc_container_count();
527 chunk->inc_use_count();
528
529 // Remove it from the links to this freelist
530 chunk->set_next(NULL);
531 chunk->set_prev(NULL);
532
533 // Run some verifications (some more if we did a chunk split)
534 #ifdef ASSERT
535 if (VerifyMetaspace) {
536 locked_verify();
537 VirtualSpaceNode* const vsn = chunk->container();
538 vsn->verify();
539 if (we_did_split_a_chunk) {
540 vsn->verify_free_chunks_are_ideally_merged();
541 }
542 }
607 log_trace(gc, metaspace, freelist)("returned one %s chunk at " PTR_FORMAT " (word size " SIZE_FORMAT ") to freelist.",
608 chunk_size_name(index), p2i(chunk), chunk->word_size());
609 }
610 chunk->container()->dec_container_count();
611 do_update_in_use_info_for_chunk(chunk, false);
612
613 // Chunk has been added; update counters.
614 account_for_added_chunk(chunk);
615
616 // Attempt coalesce returned chunks with its neighboring chunks:
617 // if this chunk is small or special, attempt to coalesce to a medium chunk.
618 if (index == SmallIndex || index == SpecializedIndex) {
619 if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
620 // This did not work. But if this chunk is special, we still may form a small chunk?
621 if (index == SpecializedIndex) {
622 if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
623 // give up.
624 }
625 }
626 }
627 }
628
629 if (AutoUncommitMetaChunks) {
630 chunk->uncommit();
631 }
632
633 }
634
635 void ChunkManager::return_chunk_list(Metachunk* chunks) {
636 if (chunks == NULL) {
637 return;
638 }
639 LogTarget(Trace, gc, metaspace, freelist) log;
640 if (log.is_enabled()) { // tracing
641 log.print("returning list of chunks...");
642 }
643 unsigned num_chunks_returned = 0;
644 size_t size_chunks_returned = 0;
645 Metachunk* cur = chunks;
646 while (cur != NULL) {
647 // Capture the next link before it is changed
648 // by the call to return_chunk_at_head();
649 Metachunk* next = cur->next();
650 if (log.is_enabled()) { // tracing
|