Skip to content
Snippets Groups Projects
Commit f473264e authored by Dmitry Osipenko's avatar Dmitry Osipenko
Browse files

virtio-gpu: Don't ignore virgl_renderer_resource_map() error


When virgl_renderer_resource_map() fails, we should try to map the
uninitialized data pointer and size because it will fail or crash Qemu.
We also should record whether mapping was successful in order to not
to remove the uninitialized memory region, which will crash Qemu.

Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
parent c565cc1e
Branches
Tags
No related merge requests found
...@@ -497,6 +497,8 @@ static void virgl_cmd_resource_map_blob(VirtIOGPU *g, ...@@ -497,6 +497,8 @@ static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
ret = virgl_renderer_resource_map(res->resource_id, &data, &size); ret = virgl_renderer_resource_map(res->resource_id, &data, &size);
if (ret) { if (ret) {
g_print("Virgl blob resource map error: %s\n", strerror(-ret)); g_print("Virgl blob resource map error: %s\n", strerror(-ret));
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
} }
memory_region_init_ram_device_ptr(&res->region, OBJECT(g), NULL, size, data); memory_region_init_ram_device_ptr(&res->region, OBJECT(g), NULL, size, data);
...@@ -507,6 +509,8 @@ static void virgl_cmd_resource_map_blob(VirtIOGPU *g, ...@@ -507,6 +509,8 @@ static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO; resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info); virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
res->mapped = true;
} }
static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g, static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
...@@ -532,11 +536,20 @@ static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g, ...@@ -532,11 +536,20 @@ static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
return; return;
} }
if (!res->mapped) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already unmapped %d\n",
__func__, ublob.resource_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
memory_region_set_enabled(&res->region, false); memory_region_set_enabled(&res->region, false);
memory_region_del_subregion(&g->parent_obj.hostmem, &res->region); memory_region_del_subregion(&g->parent_obj.hostmem, &res->region);
object_unparent(OBJECT(&res->region)); object_unparent(OBJECT(&res->region));
virgl_renderer_resource_unmap(ublob.resource_id); virgl_renderer_resource_unmap(ublob.resource_id);
res->mapped = false;
} }
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
......
...@@ -57,6 +57,7 @@ struct virtio_gpu_simple_resource { ...@@ -57,6 +57,7 @@ struct virtio_gpu_simple_resource {
uint8_t *remapped; uint8_t *remapped;
MemoryRegion region; MemoryRegion region;
bool mapped;
QTAILQ_ENTRY(virtio_gpu_simple_resource) next; QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
}; };
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment