@@ -522,6 +522,7 @@ static inline void page_unlock_tb(struct uc_struct *uc, const TranslationBlock *
522
522
}
523
523
}
524
524
525
+ #if 0
525
526
static inline struct page_entry *
526
527
page_entry_new (PageDesc * pd , tb_page_addr_t index )
527
528
{
@@ -542,7 +543,6 @@ static void page_entry_destroy(gpointer p)
542
543
g_free (pe );
543
544
}
544
545
545
- #if 0
546
546
/* returns false on success */
547
547
static bool page_entry_trylock (struct page_entry * pe )
548
548
{
@@ -582,7 +582,6 @@ static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
582
582
}
583
583
return FALSE;
584
584
}
585
- #endif
586
585
587
586
/*
588
587
* Trylock a page, and if successful, add the page to a collection.
@@ -613,20 +612,14 @@ static bool page_trylock_add(struct uc_struct *uc, struct page_collection *set,
613
612
*/
614
613
if (set -> max == NULL || pe -> index > set -> max -> index ) {
615
614
set -> max = pe ;
616
- #if 0
617
615
do_page_entry_lock (pe );
618
- #endif
619
616
return false;
620
617
}
621
618
/*
622
619
* Try to acquire out-of-order lock; if busy, return busy so that we acquire
623
620
* locks in order.
624
621
*/
625
- #if 0
626
622
return page_entry_trylock (pe );
627
- #else
628
- return 0 ;
629
- #endif
630
623
}
631
624
632
625
static gint tb_page_addr_cmp (gconstpointer ap , gconstpointer bp , gpointer udata )
@@ -641,6 +634,7 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
641
634
}
642
635
return 1 ;
643
636
}
637
+ #endif
644
638
645
639
/*
646
640
* Lock a range of pages ([@start,@end[) as well as the pages of all
@@ -650,6 +644,7 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
650
644
struct page_collection *
651
645
page_collection_lock (struct uc_struct * uc , tb_page_addr_t start , tb_page_addr_t end )
652
646
{
647
+ #if 0
653
648
struct page_collection * set = g_malloc (sizeof (* set ));
654
649
tb_page_addr_t index ;
655
650
PageDesc * pd ;
@@ -664,9 +659,7 @@ page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t
664
659
assert_no_pages_locked ();
665
660
666
661
retry :
667
- #if 0
668
662
g_tree_foreach (set -> tree , page_entry_lock , NULL );
669
- #endif
670
663
671
664
for (index = start ; index <= end ; index ++ ) {
672
665
TranslationBlock * tb ;
@@ -677,9 +670,7 @@ page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t
677
670
continue ;
678
671
}
679
672
if (page_trylock_add (uc , set , index << TARGET_PAGE_BITS )) {
680
- #if 0
681
673
g_tree_foreach (set -> tree , page_entry_unlock , NULL );
682
- #endif
683
674
goto retry ;
684
675
}
685
676
assert_page_locked (pd );
@@ -688,21 +679,24 @@ page_collection_lock(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t
688
679
(tb -> page_addr [1 ] != -1 &&
689
680
page_trylock_add (uc , set , tb -> page_addr [1 ]))) {
690
681
/* drop all locks, and reacquire in order */
691
- #if 0
692
682
g_tree_foreach (set -> tree , page_entry_unlock , NULL );
693
- #endif
694
683
goto retry ;
695
684
}
696
685
}
697
686
}
698
687
return set ;
688
+ #else
689
+ return NULL ;
690
+ #endif
699
691
}
700
692
701
693
void page_collection_unlock (struct page_collection * set )
702
694
{
695
+ #if 0
703
696
/* entries are unlocked and freed via page_entry_destroy */
704
697
g_tree_destroy (set -> tree );
705
698
g_free (set );
699
+ #endif
706
700
}
707
701
708
702
static void page_lock_pair (struct uc_struct * uc , PageDesc * * ret_p1 , tb_page_addr_t phys1 ,
0 commit comments