From 43cf2ed35c20ad63adb9b5e708e24a492254801d Mon Sep 17 00:00:00 2001 From: tabudz Date: Thu, 20 Feb 2025 23:05:56 +0800 Subject: [PATCH] softmmu: Always initialize xlat in address_space_translate_for_iotlb The bug is an uninitialized memory read, along the translate_fail path, which results in garbage being read from iotlb_to_section, which can lead to a crash in io_readx/io_writex. The bug may be fixed by writing any value with zero in ~TARGET_PAGE_MASK, so that the call to iotlb_to_section using the xlat'ed address returns io_mem_unassigned, as desired by the translate_fail path. It is most useful to record the original physical page address, which will eventually be logged by memory_region_access_valid when the access is rejected by unassigned_mem_accepts. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1065 Signed-off-by: Richard Henderson Reviewed-by: Peter Maydell Message-Id: <20220621153829.366423-1-richard.henderson@linaro.org> --- qemu/exec.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/qemu/exec.c b/qemu/exec.c index 9786b19557..166e34bad4 100644 --- a/qemu/exec.c +++ b/qemu/exec.c @@ -487,7 +487,7 @@ MemoryRegion *flatview_translate(struct uc_struct *uc, FlatView *fv, hwaddr addr /* Called from RCU critical section */ MemoryRegionSection * -address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, +address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, hwaddr *xlat, hwaddr *plen, MemTxAttrs attrs, int *prot) { @@ -496,6 +496,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, IOMMUMemoryRegionClass *imrc; IOMMUTLBEntry iotlb; int iommu_idx; + hwaddr addr = orig_addr; AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; for (;;) { @@ -551,6 +552,16 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, return section; translate_fail: + /* + * We should be given a page-aligned address -- certainly + * tlb_set_page_with_attrs() does so. The page offset of xlat + * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. + * The page portion of xlat will be logged by memory_region_access_valid() + * when this memory access is rejected, so use the original untranslated + * physical address. + */ + assert((orig_addr & ~TARGET_PAGE_MASK) == 0); + *xlat = orig_addr; return &d->map.sections[PHYS_SECTION_UNASSIGNED]; }