@@ -650,6 +650,7 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
650
650
struct page_collection *
651
651
page_collection_lock (struct uc_struct * uc , tb_page_addr_t start , tb_page_addr_t end )
652
652
{
653
+ #if 0
653
654
struct page_collection * set = g_malloc (sizeof (* set ));
654
655
tb_page_addr_t index ;
655
656
PageDesc * pd ;
@@ -664,9 +665,7 @@ page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t
664
665
assert_no_pages_locked ();
665
666
666
667
retry :
667
- #if 0
668
668
g_tree_foreach (set -> tree , page_entry_lock , NULL );
669
- #endif
670
669
671
670
for (index = start ; index <= end ; index ++ ) {
672
671
TranslationBlock * tb ;
@@ -677,9 +676,7 @@ page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t
677
676
continue ;
678
677
}
679
678
if (page_trylock_add (uc , set , index << TARGET_PAGE_BITS )) {
680
- #if 0
681
679
g_tree_foreach (set -> tree , page_entry_unlock , NULL );
682
- #endif
683
680
goto retry ;
684
681
}
685
682
assert_page_locked (pd );
@@ -688,21 +685,24 @@ page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t
688
685
(tb -> page_addr [1 ] != -1 &&
689
686
page_trylock_add (uc , set , tb -> page_addr [1 ]))) {
690
687
/* drop all locks, and reacquire in order */
691
- #if 0
692
688
g_tree_foreach (set -> tree , page_entry_unlock , NULL );
693
- #endif
694
689
goto retry ;
695
690
}
696
691
}
697
692
}
698
693
return set ;
694
+ #else
695
+ return NULL ;
696
+ #endif
699
697
}
700
698
701
699
void page_collection_unlock (struct page_collection * set )
702
700
{
701
+ #if 0
703
702
/* entries are unlocked and freed via page_entry_destroy */
704
703
g_tree_destroy (set -> tree );
705
704
g_free (set );
705
+ #endif
706
706
}
707
707
708
708
static void page_lock_pair (struct uc_struct * uc , PageDesc * * ret_p1 , tb_page_addr_t phys1 ,
0 commit comments