Skip to content

Commit e585914

Browse files
committed
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
virtio,pci,pc: bugfixes small fixes all over the place. Signed-off-by: Michael S. Tsirkin <[email protected]> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCgAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmiMzgoPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRpAO4H+gKeZbkJFFPHBduwn/LyTTkBpEghy14wEp7G # 6y3knCkWXOVOnFJ/Lw1p6ZLtB6o547Ktin49msY+SKF2X33N1b6I0DmLxixnLVqP # fHMUF+/QssH7QdIMuZNTxr/nwdDzGnj6Rv4xVyrwdZlf+nQPE8GuXWPmAmyGwcXM # 1sEPTjZq30y2eRiQkKsgS7g+COqfPy+O3VeiyQWR1Q/Cb85alegGwUPBy289u3V+ # uHaBC6d73NWxRCHJM4J8CnWpY5LA+y/YgfJXys1NH8pzRLbTpiYt7gfUbfdHbIvF # IpjZraVh+ApbwXhQLmDmsHtGsyIE1zFlcZTq9pR6WUgYGUDQMpY= # =cJxn # -----END PGP SIGNATURE----- # gpg: Signature made Fri 01 Aug 2025 10:24:10 EDT # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "[email protected]" # gpg: Good signature from "Michael S. Tsirkin <[email protected]>" [full] # gpg: aka "Michael S. Tsirkin <[email protected]>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: net/vdpa: fix potential fd leak in net_init_vhost_vdpa() MAINTAINERS: add net/vhost* files under `vhost` intel_iommu: Allow both Status Write and Interrupt Flag in QI wait tests/acpi: virt: update HEST file with its current data tests/qtest/bios-tables-test: extend to also check HEST table tests/acpi: virt: add an empty HEST file hw/i386/amd_iommu: Fix event log generation hw/i386/amd_iommu: Support MMIO writes to the status register hw/i386/amd_iommu: Fix amdvi_write*() hw/i386/amd_iommu: Move IOAPIC memory region initialization to the end hw/i386/amd_iommu: Remove unused and wrongly set ats_enabled field hw/i386/amd_iommu: Fix MMIO register write tracing pcie_sriov: Fix configuration and state synchronization virtio-net: Fix VLAN filter table reset timing vhost: Do not abort on log-stop error vhost: Do not abort on log-start error virtio: fix off-by-one and invalid access in virtqueue_ordered_fill Signed-off-by: Stefan Hajnoczi <[email protected]>
2 parents 2b290d6 + 4caf749 commit e585914

File tree

11 files changed

+140
-64
lines changed

11 files changed

+140
-64
lines changed

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2322,6 +2322,7 @@ F: include/*/vhost*
23222322
F: subprojects/libvhost-user/
23232323
F: block/export/vhost-user*
23242324
F: util/vhost-user-server.c
2325+
F: net/vhost*
23252326

23262327
vhost-shadow-virtqueue
23272328
R: Eugenio Pérez <[email protected]>

hw/i386/amd_iommu.c

Lines changed: 79 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -123,32 +123,47 @@ static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val)
123123
uint16_t romask = lduw_le_p(&s->romask[addr]);
124124
uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
125125
uint16_t oldval = lduw_le_p(&s->mmior[addr]);
126+
127+
uint16_t oldval_preserved = oldval & (romask | w1cmask);
128+
uint16_t newval_write = val & ~romask;
129+
uint16_t newval_w1c_set = val & w1cmask;
130+
126131
stw_le_p(&s->mmior[addr],
127-
((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
132+
(oldval_preserved | newval_write) & ~newval_w1c_set);
128133
}
129134

130135
static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
131136
{
132137
uint32_t romask = ldl_le_p(&s->romask[addr]);
133138
uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
134139
uint32_t oldval = ldl_le_p(&s->mmior[addr]);
140+
141+
uint32_t oldval_preserved = oldval & (romask | w1cmask);
142+
uint32_t newval_write = val & ~romask;
143+
uint32_t newval_w1c_set = val & w1cmask;
144+
135145
stl_le_p(&s->mmior[addr],
136-
((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
146+
(oldval_preserved | newval_write) & ~newval_w1c_set);
137147
}
138148

139149
static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
140150
{
141151
uint64_t romask = ldq_le_p(&s->romask[addr]);
142152
uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
143153
uint64_t oldval = ldq_le_p(&s->mmior[addr]);
154+
155+
uint64_t oldval_preserved = oldval & (romask | w1cmask);
156+
uint64_t newval_write = val & ~romask;
157+
uint64_t newval_w1c_set = val & w1cmask;
158+
144159
stq_le_p(&s->mmior[addr],
145-
((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
160+
(oldval_preserved | newval_write) & ~newval_w1c_set);
146161
}
147162

148-
/* OR a 64-bit register with a 64-bit value */
163+
/* AND a 64-bit register with a 64-bit value */
149164
static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val)
150165
{
151-
return amdvi_readq(s, addr) | val;
166+
return amdvi_readq(s, addr) & val;
152167
}
153168

154169
/* OR a 64-bit register with a 64-bit value storing result in the register */
@@ -177,19 +192,31 @@ static void amdvi_generate_msi_interrupt(AMDVIState *s)
177192
}
178193
}
179194

195+
static uint32_t get_next_eventlog_entry(AMDVIState *s)
196+
{
197+
uint32_t evtlog_size = s->evtlog_len * AMDVI_EVENT_LEN;
198+
return (s->evtlog_tail + AMDVI_EVENT_LEN) % evtlog_size;
199+
}
200+
180201
static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
181202
{
203+
uint32_t evtlog_tail_next;
204+
182205
/* event logging not enabled */
183206
if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS,
184207
AMDVI_MMIO_STATUS_EVT_OVF)) {
185208
return;
186209
}
187210

211+
evtlog_tail_next = get_next_eventlog_entry(s);
212+
188213
/* event log buffer full */
189-
if (s->evtlog_tail >= s->evtlog_len) {
190-
amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
191-
/* generate interrupt */
192-
amdvi_generate_msi_interrupt(s);
214+
if (evtlog_tail_next == s->evtlog_head) {
215+
/* generate overflow interrupt */
216+
if (s->evtlog_intr) {
217+
amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
218+
amdvi_generate_msi_interrupt(s);
219+
}
193220
return;
194221
}
195222

@@ -198,9 +225,13 @@ static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
198225
trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail);
199226
}
200227

201-
s->evtlog_tail += AMDVI_EVENT_LEN;
202-
amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
203-
amdvi_generate_msi_interrupt(s);
228+
s->evtlog_tail = evtlog_tail_next;
229+
amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail);
230+
231+
if (s->evtlog_intr) {
232+
amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVENT_INT);
233+
amdvi_generate_msi_interrupt(s);
234+
}
204235
}
205236

206237
static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start,
@@ -592,18 +623,31 @@ static void amdvi_cmdbuf_run(AMDVIState *s)
592623
}
593624
}
594625

595-
static void amdvi_mmio_trace(hwaddr addr, unsigned size)
626+
static inline uint8_t amdvi_mmio_get_index(hwaddr addr)
596627
{
597628
uint8_t index = (addr & ~0x2000) / 8;
598629

599630
if ((addr & 0x2000)) {
600631
/* high table */
601632
index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index;
602-
trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
603633
} else {
604634
index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index;
605-
trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
606635
}
636+
637+
return index;
638+
}
639+
640+
static void amdvi_mmio_trace_read(hwaddr addr, unsigned size)
641+
{
642+
uint8_t index = amdvi_mmio_get_index(addr);
643+
trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
644+
}
645+
646+
static void amdvi_mmio_trace_write(hwaddr addr, unsigned size, uint64_t val)
647+
{
648+
uint8_t index = amdvi_mmio_get_index(addr);
649+
trace_amdvi_mmio_write(amdvi_mmio_low[index], addr, size, val,
650+
addr & ~0x07);
607651
}
608652

609653
static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
@@ -623,7 +667,7 @@ static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
623667
} else if (size == 8) {
624668
val = amdvi_readq(s, addr);
625669
}
626-
amdvi_mmio_trace(addr, size);
670+
amdvi_mmio_trace_read(addr, size);
627671

628672
return val;
629673
}
@@ -633,7 +677,6 @@ static void amdvi_handle_control_write(AMDVIState *s)
633677
unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL);
634678
s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN);
635679

636-
s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN);
637680
s->evtlog_enabled = s->enabled && !!(control &
638681
AMDVI_MMIO_CONTROL_EVENTLOGEN);
639682

@@ -704,9 +747,19 @@ static inline void amdvi_handle_excllim_write(AMDVIState *s)
704747
static inline void amdvi_handle_evtbase_write(AMDVIState *s)
705748
{
706749
uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE);
750+
751+
if (amdvi_readq(s, AMDVI_MMIO_STATUS) & AMDVI_MMIO_STATUS_EVENT_INT)
752+
/* Do not reset if eventlog interrupt bit is set*/
753+
return;
754+
707755
s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK;
708756
s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE)
709757
& AMDVI_MMIO_EVTLOG_SIZE_MASK);
758+
759+
/* clear tail and head pointer to 0 when event base is updated */
760+
s->evtlog_tail = s->evtlog_head = 0;
761+
amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_HEAD, s->evtlog_head);
762+
amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail);
710763
}
711764

712765
static inline void amdvi_handle_evttail_write(AMDVIState *s)
@@ -770,7 +823,7 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
770823
return;
771824
}
772825

773-
amdvi_mmio_trace(addr, size);
826+
amdvi_mmio_trace_write(addr, size, val);
774827
switch (addr & ~0x07) {
775828
case AMDVI_MMIO_CONTROL:
776829
amdvi_mmio_reg_write(s, size, val, addr);
@@ -835,6 +888,9 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
835888
amdvi_mmio_reg_write(s, size, val, addr);
836889
amdvi_handle_pprtail_write(s);
837890
break;
891+
case AMDVI_MMIO_STATUS:
892+
amdvi_mmio_reg_write(s, size, val, addr);
893+
break;
838894
}
839895
}
840896

@@ -1542,7 +1598,6 @@ static void amdvi_init(AMDVIState *s)
15421598
s->excl_allow = false;
15431599
s->mmio_enabled = false;
15441600
s->enabled = false;
1545-
s->ats_enabled = false;
15461601
s->cmdbuf_enabled = false;
15471602

15481603
/* reset MMIO */
@@ -1613,7 +1668,8 @@ static const VMStateDescription vmstate_amdvi_sysbus_migratable = {
16131668
/* Updated in amdvi_handle_control_write() */
16141669
VMSTATE_BOOL(enabled, AMDVIState),
16151670
VMSTATE_BOOL(ga_enabled, AMDVIState),
1616-
VMSTATE_BOOL(ats_enabled, AMDVIState),
1671+
/* bool ats_enabled is obsolete */
1672+
VMSTATE_UNUSED(1), /* was ats_enabled */
16171673
VMSTATE_BOOL(cmdbuf_enabled, AMDVIState),
16181674
VMSTATE_BOOL(completion_wait_intr, AMDVIState),
16191675
VMSTATE_BOOL(evtlog_enabled, AMDVIState),
@@ -1686,9 +1742,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
16861742
s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
16871743
amdvi_uint64_equal, g_free, g_free);
16881744

1689-
/* Pseudo address space under root PCI bus. */
1690-
x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID);
1691-
16921745
/* set up MMIO */
16931746
memory_region_init_io(&s->mr_mmio, OBJECT(s), &mmio_mem_ops, s,
16941747
"amdvi-mmio", AMDVI_MMIO_SIZE);
@@ -1711,6 +1764,9 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
17111764
memory_region_add_subregion_overlap(&s->mr_sys, AMDVI_INT_ADDR_FIRST,
17121765
&s->mr_ir, 1);
17131766

1767+
/* Pseudo address space under root PCI bus. */
1768+
x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID);
1769+
17141770
if (kvm_enabled() && x86ms->apic_id_limit > 255 && !s->xtsup) {
17151771
error_report("AMD IOMMU with x2APIC configuration requires xtsup=on");
17161772
exit(EXIT_FAILURE);

hw/i386/amd_iommu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@
111111
#define AMDVI_MMIO_STATUS_CMDBUF_RUN (1 << 4)
112112
#define AMDVI_MMIO_STATUS_EVT_RUN (1 << 3)
113113
#define AMDVI_MMIO_STATUS_COMP_INT (1 << 2)
114+
#define AMDVI_MMIO_STATUS_EVENT_INT (1 << 1)
114115
#define AMDVI_MMIO_STATUS_EVT_OVF (1 << 0)
115116

116117
#define AMDVI_CMDBUF_ID_BYTE 0x07
@@ -322,7 +323,6 @@ struct AMDVIState {
322323
uint64_t mmio_addr;
323324

324325
bool enabled; /* IOMMU enabled */
325-
bool ats_enabled; /* address translation enabled */
326326
bool cmdbuf_enabled; /* command buffer enabled */
327327
bool evtlog_enabled; /* event log enabled */
328328
bool excl_enabled;

hw/i386/intel_iommu.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2828,6 +2828,7 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
28282828
{
28292829
uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI,
28302830
VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
2831+
bool ret = true;
28312832

28322833
if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
28332834
__func__, "wait")) {
@@ -2839,8 +2840,6 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
28392840
uint32_t status_data = (uint32_t)(inv_desc->lo >>
28402841
VTD_INV_DESC_WAIT_DATA_SHIFT);
28412842

2842-
assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
2843-
28442843
/* FIXME: need to be masked with HAW? */
28452844
dma_addr_t status_addr = inv_desc->hi;
28462845
trace_vtd_inv_desc_wait_sw(status_addr, status_data);
@@ -2849,18 +2848,22 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
28492848
&status_data, sizeof(status_data),
28502849
MEMTXATTRS_UNSPECIFIED)) {
28512850
trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
2852-
return false;
2851+
ret = false;
28532852
}
2854-
} else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
2853+
}
2854+
2855+
if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
28552856
/* Interrupt flag */
28562857
vtd_generate_completion_event(s);
2857-
} else {
2858+
}
2859+
2860+
if (!(inv_desc->lo & (VTD_INV_DESC_WAIT_IF | VTD_INV_DESC_WAIT_SW))) {
28582861
error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64
28592862
" (unknown type)", __func__, inv_desc->hi,
28602863
inv_desc->lo);
28612864
return false;
28622865
}
2863-
return true;
2866+
return ret;
28642867
}
28652868

28662869
static bool vtd_process_context_cache_desc(IntelIOMMUState *s,

hw/net/virtio-net.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -929,8 +929,9 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
929929
vhost_net_save_acked_features(nc->peer);
930930
}
931931

932-
if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
933-
memset(n->vlans, 0xff, MAX_VLAN >> 3);
932+
if (virtio_has_feature(vdev->guest_features ^ features, VIRTIO_NET_F_CTRL_VLAN)) {
933+
bool vlan = virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN);
934+
memset(n->vlans, vlan ? 0 : 0xff, MAX_VLAN >> 3);
934935
}
935936

936937
if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
@@ -3942,6 +3943,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
39423943
n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
39433944

39443945
n->vlans = g_malloc0(MAX_VLAN >> 3);
3946+
memset(n->vlans, 0xff, MAX_VLAN >> 3);
39453947

39463948
nc = qemu_get_queue(n->nic);
39473949
nc->rxfilter_notify_enabled = 1;
@@ -4041,7 +4043,6 @@ static void virtio_net_reset(VirtIODevice *vdev)
40414043
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
40424044
memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
40434045
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
4044-
memset(n->vlans, 0, MAX_VLAN >> 3);
40454046

40464047
/* Flush any async TX */
40474048
for (i = 0; i < n->max_queue_pairs; i++) {

hw/pci/pcie_sriov.c

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,27 @@ static void unregister_vfs(PCIDevice *dev)
6464
pci_set_word(dev->wmask + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0xffff);
6565
}
6666

67+
static void consume_config(PCIDevice *dev)
68+
{
69+
uint8_t *cfg = dev->config + dev->exp.sriov_cap;
70+
71+
if (pci_get_word(cfg + PCI_SRIOV_CTRL) & PCI_SRIOV_CTRL_VFE) {
72+
register_vfs(dev);
73+
} else {
74+
uint8_t *wmask = dev->wmask + dev->exp.sriov_cap;
75+
uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF);
76+
uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI;
77+
78+
unregister_vfs(dev);
79+
80+
if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) {
81+
wmask_val |= PCI_SRIOV_CTRL_VFE;
82+
}
83+
84+
pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val);
85+
}
86+
}
87+
6788
static bool pcie_sriov_pf_init_common(PCIDevice *dev, uint16_t offset,
6889
uint16_t vf_dev_id, uint16_t init_vfs,
6990
uint16_t total_vfs, uint16_t vf_offset,
@@ -416,30 +437,13 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
416437
trace_sriov_config_write(dev->name, PCI_SLOT(dev->devfn),
417438
PCI_FUNC(dev->devfn), off, val, len);
418439

419-
if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) {
420-
if (val & PCI_SRIOV_CTRL_VFE) {
421-
register_vfs(dev);
422-
} else {
423-
unregister_vfs(dev);
424-
}
425-
} else if (range_covers_byte(off, len, PCI_SRIOV_NUM_VF)) {
426-
uint8_t *cfg = dev->config + sriov_cap;
427-
uint8_t *wmask = dev->wmask + sriov_cap;
428-
uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF);
429-
uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI;
430-
431-
if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) {
432-
wmask_val |= PCI_SRIOV_CTRL_VFE;
433-
}
434-
435-
pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val);
436-
}
440+
consume_config(dev);
437441
}
438442

439443
void pcie_sriov_pf_post_load(PCIDevice *dev)
440444
{
441445
if (dev->exp.sriov_cap) {
442-
register_vfs(dev);
446+
consume_config(dev);
443447
}
444448
}
445449

0 commit comments

Comments
 (0)