Commit 3df4d692 authored by Ilia Mirkin's avatar Ilia Mirkin

nouveau: add ARB_buffer_storage support

Signed-off-by: 's avatarIlia Mirkin <imirkin@alum.mit.edu>
parent b0d02db7
......@@ -342,6 +342,8 @@ nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
return FALSE;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return FALSE;
if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
return FALSE;
return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
}
......@@ -402,6 +404,9 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
!util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
if (usage & PIPE_TRANSFER_PERSISTENT)
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
if (buf->domain == NOUVEAU_BO_VRAM) {
if (usage & NOUVEAU_TRANSFER_DISCARD) {
/* Set up a staging area for the user to write to. It will be copied
......@@ -645,8 +650,11 @@ nouveau_buffer_create(struct pipe_screen *pscreen,
pipe_reference_init(&buffer->base.reference, 1);
buffer->base.screen = pscreen;
if (buffer->base.bind &
(screen->vidmem_bindings & screen->sysmem_bindings)) {
if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
buffer->domain = NOUVEAU_BO_GART;
} else if (buffer->base.bind &
(screen->vidmem_bindings & screen->sysmem_bindings)) {
switch (buffer->base.usage) {
case PIPE_USAGE_DEFAULT:
case PIPE_USAGE_IMMUTABLE:
......
......@@ -31,6 +31,26 @@
#include "nv30/nv30_resource.h"
#include "nv30/nv30_transfer.h"
static void
nv30_memory_barrier(struct pipe_context *pipe, unsigned flags)
{
struct nv30_context *nv30 = nv30_context(pipe);
int i;
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (!nv30->vtxbuf[i].buffer)
continue;
if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = TRUE;
}
if (nv30->idxbuf.buffer &&
nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = TRUE;
}
}
static struct pipe_resource *
nv30_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
......@@ -75,4 +95,5 @@ nv30_resource_init(struct pipe_context *pipe)
pipe->resource_copy_region = nv30_resource_copy_region;
pipe->blit = nv30_blit;
pipe->flush_resource = nv30_flush_resource;
pipe->memory_barrier = nv30_memory_barrier;
}
......@@ -79,6 +79,7 @@ nv30_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_TGSI_TEXCOORD:
case PIPE_CAP_USER_CONSTANT_BUFFERS:
case PIPE_CAP_USER_INDEX_BUFFERS:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
return 1;
case PIPE_CAP_USER_VERTEX_BUFFERS:
return 0;
......@@ -132,7 +133,6 @@ nv30_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_TGSI_VS_LAYER:
case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
case PIPE_CAP_TEXTURE_GATHER_SM5:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_FAKE_SW_MSAA:
case PIPE_CAP_TEXTURE_QUERY_LOD:
case PIPE_CAP_SAMPLE_SHADING:
......
......@@ -545,6 +545,7 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
struct nv30_context *nv30 = nv30_context(pipe);
struct nouveau_pushbuf *push = nv30->base.pushbuf;
int i;
/* For picking only a few vertices from a large user buffer, push is better,
* if index count is larger and we expect repeated vertices, suggest upload.
......@@ -573,6 +574,17 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
return;
}
for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
if (!nv30->vtxbuf[i].buffer)
continue;
if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = TRUE;
}
if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer &&
nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = TRUE;
if (nv30->base.vbo_dirty) {
BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
PUSH_DATA (push, 0);
......
......@@ -57,6 +57,26 @@ nv50_texture_barrier(struct pipe_context *pipe)
PUSH_DATA (push, 0x20);
}
static void
nv50_memory_barrier(struct pipe_context *pipe, unsigned flags)
{
struct nv50_context *nv50 = nv50_context(pipe);
int i;
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv50->num_vtxbufs; ++i) {
if (!nv50->vtxbuf[i].buffer)
continue;
if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv50->base.vbo_dirty = TRUE;
}
if (nv50->idxbuf.buffer &&
nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv50->base.vbo_dirty = TRUE;
}
}
void
nv50_default_kick_notify(struct nouveau_pushbuf *push)
{
......@@ -249,6 +269,7 @@ nv50_create(struct pipe_screen *pscreen, void *priv)
pipe->flush = nv50_flush;
pipe->texture_barrier = nv50_texture_barrier;
pipe->memory_barrier = nv50_memory_barrier;
pipe->get_sample_position = nv50_context_get_sample_position;
if (!screen->cur_ctx) {
......
......@@ -106,6 +106,7 @@ nv50_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
case PIPE_CAP_ANISOTROPIC_FILTER:
case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
return 1;
case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
return 65536;
......@@ -196,7 +197,6 @@ nv50_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
return PIPE_ENDIAN_LITTLE;
case PIPE_CAP_TGSI_VS_LAYER:
case PIPE_CAP_TEXTURE_GATHER_SM5:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_FAKE_SW_MSAA:
return 0;
case PIPE_CAP_MAX_VIEWPORTS:
......
......@@ -747,6 +747,7 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
struct nv50_context *nv50 = nv50_context(pipe);
struct nouveau_pushbuf *push = nv50->base.pushbuf;
int i;
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nv50->vb_elt_first = info->min_index + info->index_bias;
......@@ -789,6 +790,17 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
PUSH_DATA (push, info->start_instance);
}
for (i = 0; i < nv50->num_vtxbufs && !nv50->base.vbo_dirty; ++i) {
if (!nv50->vtxbuf[i].buffer)
continue;
if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv50->base.vbo_dirty = TRUE;
}
if (!nv50->base.vbo_dirty && nv50->idxbuf.buffer &&
nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv50->base.vbo_dirty = TRUE;
if (nv50->base.vbo_dirty) {
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_FLUSH), 1);
PUSH_DATA (push, 0);
......
......@@ -56,6 +56,26 @@ nvc0_texture_barrier(struct pipe_context *pipe)
IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
}
static void
nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
int i;
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
if (!nvc0->vtxbuf[i].buffer)
continue;
if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nvc0->base.vbo_dirty = TRUE;
}
if (nvc0->idxbuf.buffer &&
nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nvc0->base.vbo_dirty = TRUE;
}
}
static void
nvc0_context_unreference_resources(struct nvc0_context *nvc0)
{
......@@ -264,6 +284,7 @@ nvc0_create(struct pipe_screen *pscreen, void *priv)
pipe->flush = nvc0_flush;
pipe->texture_barrier = nvc0_texture_barrier;
pipe->memory_barrier = nvc0_memory_barrier;
pipe->get_sample_position = nvc0_context_get_sample_position;
if (!screen->cur_ctx) {
......
......@@ -152,6 +152,7 @@ nvc0_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_TEXTURE_BARRIER:
case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
case PIPE_CAP_START_INSTANCE:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
return 1;
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
return 0; /* state trackers will know better */
......@@ -179,7 +180,6 @@ nvc0_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
return PIPE_ENDIAN_LITTLE;
case PIPE_CAP_TGSI_VS_LAYER:
case PIPE_CAP_TEXTURE_GATHER_SM5:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_FAKE_SW_MSAA:
return 0;
case PIPE_CAP_MAX_VIEWPORTS:
......
......@@ -797,6 +797,7 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
int i;
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nvc0->vb_elt_first = info->min_index + info->index_bias;
......@@ -846,6 +847,17 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
PUSH_DATA (push, info->start_instance);
}
for (i = 0; i < nvc0->num_vtxbufs && !nvc0->base.vbo_dirty; ++i) {
if (!nvc0->vtxbuf[i].buffer)
continue;
if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->base.vbo_dirty = TRUE;
}
if (!nvc0->base.vbo_dirty && nvc0->idxbuf.buffer &&
nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->base.vbo_dirty = TRUE;
if (nvc0->base.vbo_dirty) {
IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
nvc0->base.vbo_dirty = FALSE;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment