ring.c 5.83 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
/*
 * Copyright (C) 2016 Red Hat, Inc.
 * Author: Michael S. Tsirkin <mst@redhat.com>
 * This work is licensed under the terms of the GNU GPL, version 2.
 *
 * Simple descriptor-based ring. virtio 0.9 compatible event index is used for
 * signalling, unconditionally.
 */
#define _GNU_SOURCE
#include "main.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>

/* Next - Where next entry will be written.
 * Prev - "Next" value when event triggered previously.
 * Event - Peer requested event after writing this entry.
 */
static inline bool need_event(unsigned short event,
			      unsigned short next,
			      unsigned short prev)
{
	return (unsigned short)(next - event - 1) < (unsigned short)(next - prev);
}

/* Design:
 * Guest adds descriptors with unique index values and DESC_HW in flags.
 * Host overwrites used descriptors with correct len, index, and DESC_HW clear.
 * Flags are always set last.
 */
#define DESC_HW 0x1

struct desc {
	unsigned short flags;
	unsigned short index;
	unsigned len;
	unsigned long long addr;
};

/* how much padding is needed to avoid false cache sharing */
#define HOST_GUEST_PADDING 0x80

/* Mostly read */
struct event {
	unsigned short kick_index;
	unsigned char reserved0[HOST_GUEST_PADDING - 2];
	unsigned short call_index;
	unsigned char reserved1[HOST_GUEST_PADDING - 2];
};

struct data {
	void *buf; /* descriptor is writeable, we can't get buf from there */
	void *data;
} *data;

struct desc *ring;
struct event *event;

struct guest {
	unsigned avail_idx;
	unsigned last_used_idx;
	unsigned num_free;
	unsigned kicked_avail_idx;
	unsigned char reserved[HOST_GUEST_PADDING - 12];
} guest;

struct host {
	/* we do not need to track last avail index
	 * unless we have more than one in flight.
	 */
	unsigned used_idx;
	unsigned called_used_idx;
	unsigned char reserved[HOST_GUEST_PADDING - 4];
} host;

/* implemented by ring */
void alloc_ring(void)
{
	int ret;
	int i;

	ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring);
	if (ret) {
		perror("Unable to allocate ring buffer.\n");
		exit(3);
	}
87
	event = calloc(1, sizeof(*event));
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
	if (!event) {
		perror("Unable to allocate event buffer.\n");
		exit(3);
	}
	guest.avail_idx = 0;
	guest.kicked_avail_idx = -1;
	guest.last_used_idx = 0;
	host.used_idx = 0;
	host.called_used_idx = -1;
	for (i = 0; i < ring_size; ++i) {
		struct desc desc = {
			.index = i,
		};
		ring[i] = desc;
	}
	guest.num_free = ring_size;
104
	data = calloc(ring_size, sizeof(*data));
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	if (!data) {
		perror("Unable to allocate data buffer.\n");
		exit(3);
	}
}

/* guest side */
int add_inbuf(unsigned len, void *buf, void *datap)
{
	unsigned head, index;

	if (!guest.num_free)
		return -1;

	guest.num_free--;
	head = (ring_size - 1) & (guest.avail_idx++);

	/* Start with a write. On MESI architectures this helps
	 * avoid a shared state with consumer that is polling this descriptor.
	 */
	ring[head].addr = (unsigned long)(void*)buf;
	ring[head].len = len;
	/* read below might bypass write above. That is OK because it's just an
	 * optimization. If this happens, we will get the cache line in a
	 * shared state which is unfortunate, but probably not worth it to
	 * add an explicit full barrier to avoid this.
	 */
	barrier();
	index = ring[head].index;
	data[index].buf = buf;
	data[index].data = datap;
	/* Barrier A (for pairing) */
	smp_release();
	ring[head].flags = DESC_HW;

	return 0;
}

void *get_buf(unsigned *lenp, void **bufp)
{
	unsigned head = (ring_size - 1) & guest.last_used_idx;
	unsigned index;
	void *datap;

	if (ring[head].flags & DESC_HW)
		return NULL;
	/* Barrier B (for pairing) */
	smp_acquire();
	*lenp = ring[head].len;
	index = ring[head].index & (ring_size - 1);
	datap = data[index].data;
	*bufp = data[index].buf;
	data[index].buf = NULL;
	data[index].data = NULL;
	guest.num_free++;
	guest.last_used_idx++;
	return datap;
}

164
bool used_empty()
165 166 167
{
	unsigned head = (ring_size - 1) & guest.last_used_idx;

168
	return (ring[head].flags & DESC_HW);
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
}

void disable_call()
{
	/* Doing nothing to disable calls might cause
	 * extra interrupts, but reduces the number of cache misses.
	 */
}

bool enable_call()
{
	event->call_index = guest.last_used_idx;
	/* Flush call index write */
	/* Barrier D (for pairing) */
	smp_mb();
184
	return used_empty();
185 186 187 188
}

void kick_available(void)
{
189 190
	bool need;

191 192 193
	/* Flush in previous flags write */
	/* Barrier C (for pairing) */
	smp_mb();
194 195 196
	need = need_event(event->kick_index,
			   guest.avail_idx,
			   guest.kicked_avail_idx);
197 198

	guest.kicked_avail_idx = guest.avail_idx;
199 200
	if (need)
		kick();
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
}

/* host side */
void disable_kick()
{
	/* Doing nothing to disable kicks might cause
	 * extra interrupts, but reduces the number of cache misses.
	 */
}

bool enable_kick()
{
	event->kick_index = host.used_idx;
	/* Barrier C (for pairing) */
	smp_mb();
216
	return avail_empty();
217 218
}

219
bool avail_empty()
220 221 222
{
	unsigned head = (ring_size - 1) & host.used_idx;

223
	return !(ring[head].flags & DESC_HW);
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
}

bool use_buf(unsigned *lenp, void **bufp)
{
	unsigned head = (ring_size - 1) & host.used_idx;

	if (!(ring[head].flags & DESC_HW))
		return false;

	/* make sure length read below is not speculated */
	/* Barrier A (for pairing) */
	smp_acquire();

	/* simple in-order completion: we don't need
	 * to touch index at all. This also means we
	 * can just modify the descriptor in-place.
	 */
	ring[head].len--;
	/* Make sure len is valid before flags.
	 * Note: alternative is to write len and flags in one access -
	 * possible on 64 bit architectures but wmb is free on Intel anyway
	 * so I have no way to test whether it's a gain.
	 */
	/* Barrier B (for pairing) */
	smp_release();
	ring[head].flags = 0;
	host.used_idx++;
	return true;
}

void call_used(void)
{
256 257
	bool need;

258 259 260
	/* Flush in previous flags write */
	/* Barrier D (for pairing) */
	smp_mb();
261 262

	need = need_event(event->call_index,
263
			host.used_idx,
264
			host.called_used_idx);
265 266

	host.called_used_idx = host.used_idx;
267 268 269

	if (need)
		call();
270
}