Gallium 3D: softpipe driver

git-svn-id: svn://kolibrios.org@3772 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-07-06 12:52:42 +00:00
parent c67e638d08
commit 985e4f70b5
34 changed files with 13334 additions and 52 deletions

View File

@ -0,0 +1,322 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* Authors: Zack Rusin <zack@tungstengraphics.com>
*/
#include "util/u_debug.h"
#include "util/u_memory.h"
#include "cso_cache.h"
#include "cso_hash.h"
struct cso_cache {
struct cso_hash *hashes[CSO_CACHE_MAX];
int max_size;
cso_sanitize_callback sanitize_cb;
void *sanitize_data;
};
#if 1
static unsigned hash_key(const void *key, unsigned key_size)
{
unsigned *ikey = (unsigned *)key;
unsigned hash = 0, i;
assert(key_size % 4 == 0);
/* I'm sure this can be improved on:
*/
for (i = 0; i < key_size/4; i++)
hash ^= ikey[i];
return hash;
}
#else
static unsigned hash_key(const unsigned char *p, int n)
{
unsigned h = 0;
unsigned g;
while (n--) {
h = (h << 4) + *p++;
if ((g = (h & 0xf0000000)) != 0)
h ^= g >> 23;
h &= ~g;
}
return h;
}
#endif
unsigned cso_construct_key(void *item, int item_size)
{
return hash_key((item), item_size);
}
static INLINE struct cso_hash *_cso_hash_for_type(struct cso_cache *sc, enum cso_cache_type type)
{
struct cso_hash *hash;
hash = sc->hashes[type];
return hash;
}
static void delete_blend_state(void *state, void *data)
{
struct cso_blend *cso = (struct cso_blend *)state;
if (cso->delete_state)
cso->delete_state(cso->context, cso->data);
FREE(state);
}
static void delete_depth_stencil_state(void *state, void *data)
{
struct cso_depth_stencil_alpha *cso = (struct cso_depth_stencil_alpha *)state;
if (cso->delete_state)
cso->delete_state(cso->context, cso->data);
FREE(state);
}
static void delete_sampler_state(void *state, void *data)
{
struct cso_sampler *cso = (struct cso_sampler *)state;
if (cso->delete_state)
cso->delete_state(cso->context, cso->data);
FREE(state);
}
static void delete_rasterizer_state(void *state, void *data)
{
struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
if (cso->delete_state)
cso->delete_state(cso->context, cso->data);
FREE(state);
}
static void delete_velements(void *state, void *data)
{
struct cso_velements *cso = (struct cso_velements *)state;
if (cso->delete_state)
cso->delete_state(cso->context, cso->data);
FREE(state);
}
static INLINE void delete_cso(void *state, enum cso_cache_type type)
{
switch (type) {
case CSO_BLEND:
delete_blend_state(state, 0);
break;
case CSO_SAMPLER:
delete_sampler_state(state, 0);
break;
case CSO_DEPTH_STENCIL_ALPHA:
delete_depth_stencil_state(state, 0);
break;
case CSO_RASTERIZER:
delete_rasterizer_state(state, 0);
break;
case CSO_VELEMENTS:
delete_velements(state, 0);
break;
default:
assert(0);
FREE(state);
}
}
static INLINE void sanitize_hash(struct cso_cache *sc,
struct cso_hash *hash,
enum cso_cache_type type,
int max_size)
{
if (sc->sanitize_cb)
sc->sanitize_cb(hash, type, max_size, sc->sanitize_data);
}
static INLINE void sanitize_cb(struct cso_hash *hash, enum cso_cache_type type,
int max_size, void *user_data)
{
/* if we're approach the maximum size, remove fourth of the entries
* otherwise every subsequent call will go through the same */
int hash_size = cso_hash_size(hash);
int max_entries = (max_size > hash_size) ? max_size : hash_size;
int to_remove = (max_size < max_entries) * max_entries/4;
if (hash_size > max_size)
to_remove += hash_size - max_size;
while (to_remove) {
/*remove elements until we're good */
/*fixme: currently we pick the nodes to remove at random*/
struct cso_hash_iter iter = cso_hash_first_node(hash);
void *cso = cso_hash_take(hash, cso_hash_iter_key(iter));
delete_cso(cso, type);
--to_remove;
}
}
struct cso_hash_iter
cso_insert_state(struct cso_cache *sc,
unsigned hash_key, enum cso_cache_type type,
void *state)
{
struct cso_hash *hash = _cso_hash_for_type(sc, type);
sanitize_hash(sc, hash, type, sc->max_size);
return cso_hash_insert(hash, hash_key, state);
}
struct cso_hash_iter
cso_find_state(struct cso_cache *sc,
unsigned hash_key, enum cso_cache_type type)
{
struct cso_hash *hash = _cso_hash_for_type(sc, type);
return cso_hash_find(hash, hash_key);
}
void *cso_hash_find_data_from_template( struct cso_hash *hash,
unsigned hash_key,
void *templ,
int size )
{
struct cso_hash_iter iter = cso_hash_find(hash, hash_key);
while (!cso_hash_iter_is_null(iter)) {
void *iter_data = cso_hash_iter_data(iter);
if (!memcmp(iter_data, templ, size)) {
/* We found a match
*/
return iter_data;
}
iter = cso_hash_iter_next(iter);
}
return NULL;
}
struct cso_hash_iter cso_find_state_template(struct cso_cache *sc,
unsigned hash_key, enum cso_cache_type type,
void *templ, unsigned size)
{
struct cso_hash_iter iter = cso_find_state(sc, hash_key, type);
while (!cso_hash_iter_is_null(iter)) {
void *iter_data = cso_hash_iter_data(iter);
if (!memcmp(iter_data, templ, size))
return iter;
iter = cso_hash_iter_next(iter);
}
return iter;
}
void * cso_take_state(struct cso_cache *sc,
unsigned hash_key, enum cso_cache_type type)
{
struct cso_hash *hash = _cso_hash_for_type(sc, type);
return cso_hash_take(hash, hash_key);
}
struct cso_cache *cso_cache_create(void)
{
struct cso_cache *sc = MALLOC_STRUCT(cso_cache);
int i;
if (sc == NULL)
return NULL;
sc->max_size = 4096;
for (i = 0; i < CSO_CACHE_MAX; i++)
sc->hashes[i] = cso_hash_create();
sc->sanitize_cb = sanitize_cb;
sc->sanitize_data = 0;
return sc;
}
void cso_for_each_state(struct cso_cache *sc, enum cso_cache_type type,
cso_state_callback func, void *user_data)
{
struct cso_hash *hash = _cso_hash_for_type(sc, type);
struct cso_hash_iter iter;
iter = cso_hash_first_node(hash);
while (!cso_hash_iter_is_null(iter)) {
void *state = cso_hash_iter_data(iter);
iter = cso_hash_iter_next(iter);
if (state) {
func(state, user_data);
}
}
}
void cso_cache_delete(struct cso_cache *sc)
{
int i;
assert(sc);
if (!sc)
return;
/* delete driver data */
cso_for_each_state(sc, CSO_BLEND, delete_blend_state, 0);
cso_for_each_state(sc, CSO_DEPTH_STENCIL_ALPHA, delete_depth_stencil_state, 0);
cso_for_each_state(sc, CSO_RASTERIZER, delete_rasterizer_state, 0);
cso_for_each_state(sc, CSO_SAMPLER, delete_sampler_state, 0);
cso_for_each_state(sc, CSO_VELEMENTS, delete_velements, 0);
for (i = 0; i < CSO_CACHE_MAX; i++)
cso_hash_delete(sc->hashes[i]);
FREE(sc);
}
void cso_set_maximum_cache_size(struct cso_cache *sc, int number)
{
int i;
sc->max_size = number;
for (i = 0; i < CSO_CACHE_MAX; i++)
sanitize_hash(sc, sc->hashes[i], i, sc->max_size);
}
int cso_maximum_cache_size(const struct cso_cache *sc)
{
return sc->max_size;
}
void cso_cache_set_sanitize_callback(struct cso_cache *sc,
cso_sanitize_callback cb,
void *user_data)
{
sc->sanitize_cb = cb;
sc->sanitize_data = user_data;
}

View File

@ -0,0 +1,175 @@
/**************************************************************************
*
* Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* Constant State Object (CSO) cache.
*
* The basic idea is that the states are created via the
* create_state/bind_state/delete_state semantics. The driver is expected to
* perform as much of the Gallium state translation to whatever its internal
* representation is during the create call. Gallium then has a caching
* mechanism where it stores the created states. When the pipeline needs an
* actual state change, a bind call is issued. In the bind call the driver
* gets its already translated representation.
*
* Those semantics mean that the driver doesn't do the repeated translations
* of states on every frame, but only once, when a new state is actually
* created.
*
* Even on hardware that doesn't do any kind of state cache, it makes the
* driver look a lot neater, plus it avoids all the redundant state
* translations on every frame.
*
* Currently our constant state objects are:
* - alpha test
* - blend
* - depth stencil
* - fragment shader
* - rasterizer (old setup)
* - sampler
* - vertex shader
* - vertex elements
*
* Things that are not constant state objects include:
* - blend_color
* - clip_state
* - clear_color_state
* - constant_buffer
* - feedback_state
* - framebuffer_state
* - polygon_stipple
* - scissor_state
* - texture_state
* - viewport_state
*
* @author Zack Rusin <zack@tungstengraphics.com>
*/
#ifndef CSO_CACHE_H
#define CSO_CACHE_H
#include "pipe/p_context.h"
#include "pipe/p_state.h"
/* cso_hash.h is necessary for cso_hash_iter, as MSVC requires structures
* returned by value to be fully defined */
#include "cso_hash.h"
#ifdef __cplusplus
extern "C" {
#endif
enum cso_cache_type {
CSO_RASTERIZER,
CSO_BLEND,
CSO_DEPTH_STENCIL_ALPHA,
CSO_SAMPLER,
CSO_VELEMENTS,
CSO_CACHE_MAX,
};
typedef void (*cso_state_callback)(void *ctx, void *obj);
typedef void (*cso_sanitize_callback)(struct cso_hash *hash,
enum cso_cache_type type,
int max_size,
void *user_data);
struct cso_cache;
struct cso_blend {
struct pipe_blend_state state;
void *data;
cso_state_callback delete_state;
struct pipe_context *context;
};
struct cso_depth_stencil_alpha {
struct pipe_depth_stencil_alpha_state state;
void *data;
cso_state_callback delete_state;
struct pipe_context *context;
};
struct cso_rasterizer {
struct pipe_rasterizer_state state;
void *data;
cso_state_callback delete_state;
struct pipe_context *context;
};
struct cso_sampler {
struct pipe_sampler_state state;
void *data;
cso_state_callback delete_state;
struct pipe_context *context;
};
struct cso_velems_state {
unsigned count;
struct pipe_vertex_element velems[PIPE_MAX_ATTRIBS];
};
struct cso_velements {
struct cso_velems_state state;
void *data;
cso_state_callback delete_state;
struct pipe_context *context;
};
unsigned cso_construct_key(void *item, int item_size);
struct cso_cache *cso_cache_create(void);
void cso_cache_delete(struct cso_cache *sc);
void cso_cache_set_sanitize_callback(struct cso_cache *sc,
cso_sanitize_callback cb,
void *user_data);
struct cso_hash_iter cso_insert_state(struct cso_cache *sc,
unsigned hash_key, enum cso_cache_type type,
void *state);
struct cso_hash_iter cso_find_state(struct cso_cache *sc,
unsigned hash_key, enum cso_cache_type type);
struct cso_hash_iter cso_find_state_template(struct cso_cache *sc,
unsigned hash_key, enum cso_cache_type type,
void *templ, unsigned size);
void cso_for_each_state(struct cso_cache *sc, enum cso_cache_type type,
cso_state_callback func, void *user_data);
void * cso_take_state(struct cso_cache *sc, unsigned hash_key,
enum cso_cache_type type);
void cso_set_maximum_cache_size(struct cso_cache *sc, int number);
int cso_maximum_cache_size(const struct cso_cache *sc);
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,239 @@
/**************************************************************************
*
* Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef CSO_CONTEXT_H
#define CSO_CONTEXT_H
#include "pipe/p_context.h"
#include "pipe/p_state.h"
#include "pipe/p_defines.h"
#ifdef __cplusplus
extern "C" {
#endif
struct cso_context;
struct u_vbuf;
struct cso_context *cso_create_context( struct pipe_context *pipe );
void cso_release_all( struct cso_context *ctx );
void cso_destroy_context( struct cso_context *cso );
enum pipe_error cso_set_blend( struct cso_context *cso,
const struct pipe_blend_state *blend );
void cso_save_blend(struct cso_context *cso);
void cso_restore_blend(struct cso_context *cso);
enum pipe_error cso_set_depth_stencil_alpha( struct cso_context *cso,
const struct pipe_depth_stencil_alpha_state *dsa );
void cso_save_depth_stencil_alpha(struct cso_context *cso);
void cso_restore_depth_stencil_alpha(struct cso_context *cso);
enum pipe_error cso_set_rasterizer( struct cso_context *cso,
const struct pipe_rasterizer_state *rasterizer );
void cso_save_rasterizer(struct cso_context *cso);
void cso_restore_rasterizer(struct cso_context *cso);
enum pipe_error
cso_set_samplers(struct cso_context *cso,
unsigned shader_stage,
unsigned count,
const struct pipe_sampler_state **states);
void
cso_save_samplers(struct cso_context *cso, unsigned shader_stage);
void
cso_restore_samplers(struct cso_context *cso, unsigned shader_stage);
/* Alternate interface to support state trackers that like to modify
* samplers one at a time:
*/
enum pipe_error
cso_single_sampler(struct cso_context *cso,
unsigned shader_stage,
unsigned count,
const struct pipe_sampler_state *states);
void
cso_single_sampler_done(struct cso_context *cso, unsigned shader_stage);
enum pipe_error cso_set_vertex_elements(struct cso_context *ctx,
unsigned count,
const struct pipe_vertex_element *states);
void cso_save_vertex_elements(struct cso_context *ctx);
void cso_restore_vertex_elements(struct cso_context *ctx);
void cso_set_vertex_buffers(struct cso_context *ctx,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *buffers);
/* One vertex buffer slot is provided with the save/restore functionality.
* cso_context chooses the slot, it can be non-zero. */
void cso_save_aux_vertex_buffer_slot(struct cso_context *ctx);
void cso_restore_aux_vertex_buffer_slot(struct cso_context *ctx);
unsigned cso_get_aux_vertex_buffer_slot(struct cso_context *ctx);
void cso_set_stream_outputs(struct cso_context *ctx,
unsigned num_targets,
struct pipe_stream_output_target **targets,
unsigned append_bitmask);
void cso_save_stream_outputs(struct cso_context *ctx);
void cso_restore_stream_outputs(struct cso_context *ctx);
/*
* We don't provide shader caching in CSO. Most of the time the api provides
* object semantics for shaders anyway, and the cases where it doesn't
* (eg mesa's internally-generated texenv programs), it will be up to
* the state tracker to implement their own specialized caching.
*/
void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle);
void cso_delete_fragment_shader(struct cso_context *ctx, void *handle );
void cso_save_fragment_shader(struct cso_context *cso);
void cso_restore_fragment_shader(struct cso_context *cso);
void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle);
void cso_delete_vertex_shader(struct cso_context *ctx, void *handle );
void cso_save_vertex_shader(struct cso_context *cso);
void cso_restore_vertex_shader(struct cso_context *cso);
void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle);
void cso_delete_geometry_shader(struct cso_context *ctx, void *handle);
void cso_save_geometry_shader(struct cso_context *cso);
void cso_restore_geometry_shader(struct cso_context *cso);
void cso_set_framebuffer(struct cso_context *cso,
const struct pipe_framebuffer_state *fb);
void cso_save_framebuffer(struct cso_context *cso);
void cso_restore_framebuffer(struct cso_context *cso);
void cso_set_viewport(struct cso_context *cso,
const struct pipe_viewport_state *vp);
void cso_save_viewport(struct cso_context *cso);
void cso_restore_viewport(struct cso_context *cso);
void cso_set_blend_color(struct cso_context *cso,
const struct pipe_blend_color *bc);
void cso_set_sample_mask(struct cso_context *cso, unsigned stencil_mask);
void cso_save_sample_mask(struct cso_context *ctx);
void cso_restore_sample_mask(struct cso_context *ctx);
void cso_set_stencil_ref(struct cso_context *cso,
const struct pipe_stencil_ref *sr);
void cso_save_stencil_ref(struct cso_context *cso);
void cso_restore_stencil_ref(struct cso_context *cso);
void cso_set_render_condition(struct cso_context *cso,
struct pipe_query *query,
boolean condition, uint mode);
void cso_save_render_condition(struct cso_context *cso);
void cso_restore_render_condition(struct cso_context *cso);
/* clip state */
void
cso_set_clip(struct cso_context *cso,
const struct pipe_clip_state *clip);
void
cso_save_clip(struct cso_context *cso);
void
cso_restore_clip(struct cso_context *cso);
/* sampler view state */
void
cso_set_sampler_views(struct cso_context *cso,
unsigned shader_stage,
unsigned count,
struct pipe_sampler_view **views);
void
cso_save_sampler_views(struct cso_context *cso, unsigned shader_stage);
void
cso_restore_sampler_views(struct cso_context *cso, unsigned shader_stage);
/* constant buffers */
void cso_set_constant_buffer(struct cso_context *cso, unsigned shader_stage,
unsigned index, struct pipe_constant_buffer *cb);
void cso_set_constant_buffer_resource(struct cso_context *cso,
unsigned shader_stage,
unsigned index,
struct pipe_resource *buffer);
void cso_save_constant_buffer_slot0(struct cso_context *cso,
unsigned shader_stage);
void cso_restore_constant_buffer_slot0(struct cso_context *cso,
unsigned shader_stage);
/* drawing */
void
cso_set_index_buffer(struct cso_context *cso,
const struct pipe_index_buffer *ib);
void
cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info);
/* helper drawing function */
void
cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,439 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Zack Rusin <zack@tungstengraphics.com>
*/
#include "util/u_debug.h"
#include "util/u_memory.h"
#include "cso_hash.h"
#define MAX(a, b) ((a > b) ? (a) : (b))
static const int MinNumBits = 4;
static const unsigned char prime_deltas[] = {
0, 0, 1, 3, 1, 5, 3, 3, 1, 9, 7, 5, 3, 9, 25, 3,
1, 21, 3, 21, 7, 15, 9, 5, 3, 29, 15, 0, 0, 0, 0, 0
};
static int primeForNumBits(int numBits)
{
return (1 << numBits) + prime_deltas[numBits];
}
/*
Returns the smallest integer n such that
primeForNumBits(n) >= hint.
*/
static int countBits(int hint)
{
int numBits = 0;
int bits = hint;
while (bits > 1) {
bits >>= 1;
numBits++;
}
if (numBits >= (int)sizeof(prime_deltas)) {
numBits = sizeof(prime_deltas) - 1;
} else if (primeForNumBits(numBits) < hint) {
++numBits;
}
return numBits;
}
struct cso_node {
struct cso_node *next;
unsigned key;
void *value;
};
struct cso_hash_data {
struct cso_node *fakeNext;
struct cso_node **buckets;
int size;
int nodeSize;
short userNumBits;
short numBits;
int numBuckets;
};
struct cso_hash {
union {
struct cso_hash_data *d;
struct cso_node *e;
} data;
};
static void *cso_data_allocate_node(struct cso_hash_data *hash)
{
return MALLOC(hash->nodeSize);
}
static void cso_free_node(struct cso_node *node)
{
FREE(node);
}
static struct cso_node *
cso_hash_create_node(struct cso_hash *hash,
unsigned akey, void *avalue,
struct cso_node **anextNode)
{
struct cso_node *node = cso_data_allocate_node(hash->data.d);
if (!node)
return NULL;
node->key = akey;
node->value = avalue;
node->next = (struct cso_node*)(*anextNode);
*anextNode = node;
++hash->data.d->size;
return node;
}
static void cso_data_rehash(struct cso_hash_data *hash, int hint)
{
if (hint < 0) {
hint = countBits(-hint);
if (hint < MinNumBits)
hint = MinNumBits;
hash->userNumBits = (short)hint;
while (primeForNumBits(hint) < (hash->size >> 1))
++hint;
} else if (hint < MinNumBits) {
hint = MinNumBits;
}
if (hash->numBits != hint) {
struct cso_node *e = (struct cso_node *)(hash);
struct cso_node **oldBuckets = hash->buckets;
int oldNumBuckets = hash->numBuckets;
int i = 0;
hash->numBits = (short)hint;
hash->numBuckets = primeForNumBits(hint);
hash->buckets = MALLOC(sizeof(struct cso_node*) * hash->numBuckets);
for (i = 0; i < hash->numBuckets; ++i)
hash->buckets[i] = e;
for (i = 0; i < oldNumBuckets; ++i) {
struct cso_node *firstNode = oldBuckets[i];
while (firstNode != e) {
unsigned h = firstNode->key;
struct cso_node *lastNode = firstNode;
struct cso_node *afterLastNode;
struct cso_node **beforeFirstNode;
while (lastNode->next != e && lastNode->next->key == h)
lastNode = lastNode->next;
afterLastNode = lastNode->next;
beforeFirstNode = &hash->buckets[h % hash->numBuckets];
while (*beforeFirstNode != e)
beforeFirstNode = &(*beforeFirstNode)->next;
lastNode->next = *beforeFirstNode;
*beforeFirstNode = firstNode;
firstNode = afterLastNode;
}
}
FREE(oldBuckets);
}
}
static void cso_data_might_grow(struct cso_hash_data *hash)
{
if (hash->size >= hash->numBuckets)
cso_data_rehash(hash, hash->numBits + 1);
}
static void cso_data_has_shrunk(struct cso_hash_data *hash)
{
if (hash->size <= (hash->numBuckets >> 3) &&
hash->numBits > hash->userNumBits) {
int max = MAX(hash->numBits-2, hash->userNumBits);
cso_data_rehash(hash, max);
}
}
static struct cso_node *cso_data_first_node(struct cso_hash_data *hash)
{
struct cso_node *e = (struct cso_node *)(hash);
struct cso_node **bucket = hash->buckets;
int n = hash->numBuckets;
while (n--) {
if (*bucket != e)
return *bucket;
++bucket;
}
return e;
}
static struct cso_node **cso_hash_find_node(struct cso_hash *hash, unsigned akey)
{
struct cso_node **node;
if (hash->data.d->numBuckets) {
node = (struct cso_node **)(&hash->data.d->buckets[akey % hash->data.d->numBuckets]);
assert(*node == hash->data.e || (*node)->next);
while (*node != hash->data.e && (*node)->key != akey)
node = &(*node)->next;
} else {
node = (struct cso_node **)((const struct cso_node * const *)(&hash->data.e));
}
return node;
}
struct cso_hash_iter cso_hash_insert(struct cso_hash *hash,
unsigned key, void *data)
{
cso_data_might_grow(hash->data.d);
{
struct cso_node **nextNode = cso_hash_find_node(hash, key);
struct cso_node *node = cso_hash_create_node(hash, key, data, nextNode);
if (!node) {
struct cso_hash_iter null_iter = {hash, 0};
return null_iter;
}
{
struct cso_hash_iter iter = {hash, node};
return iter;
}
}
}
struct cso_hash * cso_hash_create(void)
{
struct cso_hash *hash = MALLOC_STRUCT(cso_hash);
if (!hash)
return NULL;
hash->data.d = MALLOC_STRUCT(cso_hash_data);
if (!hash->data.d) {
FREE(hash);
return NULL;
}
hash->data.d->fakeNext = 0;
hash->data.d->buckets = 0;
hash->data.d->size = 0;
hash->data.d->nodeSize = sizeof(struct cso_node);
hash->data.d->userNumBits = (short)MinNumBits;
hash->data.d->numBits = 0;
hash->data.d->numBuckets = 0;
return hash;
}
void cso_hash_delete(struct cso_hash *hash)
{
struct cso_node *e_for_x = (struct cso_node *)(hash->data.d);
struct cso_node **bucket = (struct cso_node **)(hash->data.d->buckets);
int n = hash->data.d->numBuckets;
while (n--) {
struct cso_node *cur = *bucket++;
while (cur != e_for_x) {
struct cso_node *next = cur->next;
cso_free_node(cur);
cur = next;
}
}
FREE(hash->data.d->buckets);
FREE(hash->data.d);
FREE(hash);
}
struct cso_hash_iter cso_hash_find(struct cso_hash *hash,
unsigned key)
{
struct cso_node **nextNode = cso_hash_find_node(hash, key);
struct cso_hash_iter iter = {hash, *nextNode};
return iter;
}
unsigned cso_hash_iter_key(struct cso_hash_iter iter)
{
if (!iter.node || iter.hash->data.e == iter.node)
return 0;
return iter.node->key;
}
void * cso_hash_iter_data(struct cso_hash_iter iter)
{
if (!iter.node || iter.hash->data.e == iter.node)
return 0;
return iter.node->value;
}
static struct cso_node *cso_hash_data_next(struct cso_node *node)
{
union {
struct cso_node *next;
struct cso_node *e;
struct cso_hash_data *d;
} a;
int start;
struct cso_node **bucket;
int n;
a.next = node->next;
if (!a.next) {
debug_printf("iterating beyond the last element\n");
return 0;
}
if (a.next->next)
return a.next;
start = (node->key % a.d->numBuckets) + 1;
bucket = a.d->buckets + start;
n = a.d->numBuckets - start;
while (n--) {
if (*bucket != a.e)
return *bucket;
++bucket;
}
return a.e;
}
static struct cso_node *cso_hash_data_prev(struct cso_node *node)
{
union {
struct cso_node *e;
struct cso_hash_data *d;
} a;
int start;
struct cso_node *sentinel;
struct cso_node **bucket;
a.e = node;
while (a.e->next)
a.e = a.e->next;
if (node == a.e)
start = a.d->numBuckets - 1;
else
start = node->key % a.d->numBuckets;
sentinel = node;
bucket = a.d->buckets + start;
while (start >= 0) {
if (*bucket != sentinel) {
struct cso_node *prev = *bucket;
while (prev->next != sentinel)
prev = prev->next;
return prev;
}
sentinel = a.e;
--bucket;
--start;
}
debug_printf("iterating backward beyond first element\n");
return a.e;
}
struct cso_hash_iter cso_hash_iter_next(struct cso_hash_iter iter)
{
struct cso_hash_iter next = {iter.hash, cso_hash_data_next(iter.node)};
return next;
}
int cso_hash_iter_is_null(struct cso_hash_iter iter)
{
if (!iter.node || iter.node == iter.hash->data.e)
return 1;
return 0;
}
void * cso_hash_take(struct cso_hash *hash,
unsigned akey)
{
struct cso_node **node = cso_hash_find_node(hash, akey);
if (*node != hash->data.e) {
void *t = (*node)->value;
struct cso_node *next = (*node)->next;
cso_free_node(*node);
*node = next;
--hash->data.d->size;
cso_data_has_shrunk(hash->data.d);
return t;
}
return 0;
}
struct cso_hash_iter cso_hash_iter_prev(struct cso_hash_iter iter)
{
struct cso_hash_iter prev = {iter.hash,
cso_hash_data_prev(iter.node)};
return prev;
}
struct cso_hash_iter cso_hash_first_node(struct cso_hash *hash)
{
struct cso_hash_iter iter = {hash, cso_data_first_node(hash->data.d)};
return iter;
}
int cso_hash_size(struct cso_hash *hash)
{
return hash->data.d->size;
}
struct cso_hash_iter cso_hash_erase(struct cso_hash *hash, struct cso_hash_iter iter)
{
struct cso_hash_iter ret = iter;
struct cso_node *node = iter.node;
struct cso_node **node_ptr;
if (node == hash->data.e)
return iter;
ret = cso_hash_iter_next(ret);
node_ptr = (struct cso_node**)(&hash->data.d->buckets[node->key % hash->data.d->numBuckets]);
while (*node_ptr != node)
node_ptr = &(*node_ptr)->next;
*node_ptr = node->next;
cso_free_node(node);
--hash->data.d->size;
return ret;
}
boolean cso_hash_contains(struct cso_hash *hash, unsigned key)
{
struct cso_node **node = cso_hash_find_node(hash, key);
return (*node != hash->data.e);
}

View File

@ -0,0 +1,129 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* Hash table implementation.
*
* This file provides a hash implementation that is capable of dealing
* with collisions. It stores colliding entries in linked list. All
* functions operating on the hash return an iterator. The iterator
* itself points to the collision list. If there wasn't any collision
* the list will have just one entry, otherwise client code should
* iterate over the entries to find the exact entry among ones that
* had the same key (e.g. memcmp could be used on the data to check
* that)
*
* @author Zack Rusin <zack@tungstengraphics.com>
*/
#ifndef CSO_HASH_H
#define CSO_HASH_H
#include "pipe/p_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
struct cso_hash;
struct cso_node;
struct cso_hash_iter {
struct cso_hash *hash;
struct cso_node *node;
};
struct cso_hash *cso_hash_create(void);
void cso_hash_delete(struct cso_hash *hash);
int cso_hash_size(struct cso_hash *hash);
/**
* Adds a data with the given key to the hash. If entry with the given
* key is already in the hash, this current entry is instered before it
* in the collision list.
* Function returns iterator pointing to the inserted item in the hash.
*/
struct cso_hash_iter cso_hash_insert(struct cso_hash *hash, unsigned key,
void *data);
/**
* Removes the item pointed to by the current iterator from the hash.
* Note that the data itself is not erased and if it was a malloc'ed pointer
* it will have to be freed after calling this function by the callee.
* Function returns iterator pointing to the item after the removed one in
* the hash.
*/
struct cso_hash_iter cso_hash_erase(struct cso_hash *hash, struct cso_hash_iter iter);
void *cso_hash_take(struct cso_hash *hash, unsigned key);
struct cso_hash_iter cso_hash_first_node(struct cso_hash *hash);
/**
* Return an iterator pointing to the first entry in the collision list.
*/
struct cso_hash_iter cso_hash_find(struct cso_hash *hash, unsigned key);
/**
* Returns true if a value with the given key exists in the hash
*/
boolean cso_hash_contains(struct cso_hash *hash, unsigned key);
int cso_hash_iter_is_null(struct cso_hash_iter iter);
unsigned cso_hash_iter_key(struct cso_hash_iter iter);
void *cso_hash_iter_data(struct cso_hash_iter iter);
struct cso_hash_iter cso_hash_iter_next(struct cso_hash_iter iter);
struct cso_hash_iter cso_hash_iter_prev(struct cso_hash_iter iter);
/**
* Convenience routine to iterate over the collision list while doing a memory
* comparison to see which entry in the list is a direct copy of our template
* and returns that entry.
*/
void *cso_hash_find_data_from_template( struct cso_hash *hash,
unsigned hash_key,
void *templ,
int size );
#ifdef __cplusplus
}
#endif
#endif

View File

@ -35,14 +35,8 @@
#include "pipe/p_config.h" #include "pipe/p_config.h"
#if defined(PIPE_OS_UNIX)
# include <time.h> /* timeval */ # include <time.h> /* timeval */
# include <sys/time.h> /* timeval */ # include <sys/time.h> /* timeval */
#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
# include <windows.h>
#else
# error Unsupported OS
#endif
#include "os_time.h" #include "os_time.h"
@ -50,32 +44,8 @@
int64_t int64_t
os_time_get_nano(void) os_time_get_nano(void)
{ {
#if defined(PIPE_OS_LINUX)
struct timespec tv;
clock_gettime(CLOCK_MONOTONIC, &tv);
return tv.tv_nsec + tv.tv_sec*INT64_C(1000000000);
#elif defined(PIPE_OS_UNIX)
struct timeval tv; struct timeval tv;
gettimeofday(&tv, NULL); gettimeofday(&tv, NULL);
return tv.tv_usec*INT64_C(1000) + tv.tv_sec*INT64_C(1000000000);
#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
static LARGE_INTEGER frequency;
LARGE_INTEGER counter;
if(!frequency.QuadPart)
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&counter);
return counter.QuadPart*INT64_C(1000000000)/frequency.QuadPart;
#else
#error Unsupported OS
#endif
} }

View File

@ -0,0 +1,288 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Generic code for buffers.
*
* Behind a pipe buffle handle there can be DMA buffers, client (or user)
* buffers, regular malloced buffers, etc. This file provides an abstract base
* buffer handle that allows the driver to cope with all those kinds of buffers
* in a more flexible way.
*
* There is no obligation of a winsys driver to use this library. And a pipe
* driver should be completly agnostic about it.
*
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#ifndef PB_BUFFER_H_
#define PB_BUFFER_H_
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "util/u_inlines.h"
#include "pipe/p_defines.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pb_vtbl;
struct pb_validate;
struct pipe_fence_handle;
#define PB_USAGE_CPU_READ (1 << 0)
#define PB_USAGE_CPU_WRITE (1 << 1)
#define PB_USAGE_GPU_READ (1 << 2)
#define PB_USAGE_GPU_WRITE (1 << 3)
#define PB_USAGE_UNSYNCHRONIZED (1 << 10)
#define PB_USAGE_DONTBLOCK (1 << 9)
#define PB_USAGE_CPU_READ_WRITE \
( PB_USAGE_CPU_READ | PB_USAGE_CPU_WRITE )
#define PB_USAGE_GPU_READ_WRITE \
( PB_USAGE_GPU_READ | PB_USAGE_GPU_WRITE )
#define PB_USAGE_WRITE \
( PB_USAGE_CPU_WRITE | PB_USAGE_GPU_WRITE )
/**
* Buffer description.
*
* Used when allocating the buffer.
*/
struct pb_desc
{
unsigned alignment;
unsigned usage;
};
/**
* Size. Regular (32bit) unsigned for now.
*/
typedef unsigned pb_size;
/**
* Base class for all pb_* buffers.
*/
struct pb_buffer
{
struct pipe_reference reference;
unsigned size;
unsigned alignment;
unsigned usage;
/**
* Pointer to the virtual function table.
*
* Avoid accessing this table directly. Use the inline functions below
* instead to avoid mistakes.
*/
const struct pb_vtbl *vtbl;
};
/**
* Virtual function table for the buffer storage operations.
*
* Note that creation is not done through this table.
*/
struct pb_vtbl
{
void (*destroy)( struct pb_buffer *buf );
/**
* Map the entire data store of a buffer object into the client's address.
* flags is bitmask of PB_USAGE_CPU_READ/WRITE.
*/
void *(*map)( struct pb_buffer *buf,
unsigned flags, void *flush_ctx );
void (*unmap)( struct pb_buffer *buf );
enum pipe_error (*validate)( struct pb_buffer *buf,
struct pb_validate *vl,
unsigned flags );
void (*fence)( struct pb_buffer *buf,
struct pipe_fence_handle *fence );
/**
* Get the base buffer and the offset.
*
* A buffer can be subdivided in smaller buffers. This method should return
* the underlaying buffer, and the relative offset.
*
* Buffers without an underlaying base buffer should return themselves, with
* a zero offset.
*
* Note that this will increase the reference count of the base buffer.
*/
void (*get_base_buffer)( struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_size *offset );
};
/* Accessor functions for pb->vtbl:
*/
static INLINE void *
pb_map(struct pb_buffer *buf,
unsigned flags, void *flush_ctx)
{
assert(buf);
if(!buf)
return NULL;
assert(pipe_is_referenced(&buf->reference));
return buf->vtbl->map(buf, flags, flush_ctx);
}
static INLINE void
pb_unmap(struct pb_buffer *buf)
{
assert(buf);
if(!buf)
return;
assert(pipe_is_referenced(&buf->reference));
buf->vtbl->unmap(buf);
}
static INLINE void
pb_get_base_buffer( struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_size *offset )
{
assert(buf);
if(!buf) {
base_buf = NULL;
offset = 0;
return;
}
assert(pipe_is_referenced(&buf->reference));
assert(buf->vtbl->get_base_buffer);
buf->vtbl->get_base_buffer(buf, base_buf, offset);
assert(*base_buf);
assert(*offset < (*base_buf)->size);
}
static INLINE enum pipe_error
pb_validate(struct pb_buffer *buf, struct pb_validate *vl, unsigned flags)
{
assert(buf);
if(!buf)
return PIPE_ERROR;
assert(buf->vtbl->validate);
return buf->vtbl->validate(buf, vl, flags);
}
static INLINE void
pb_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence)
{
assert(buf);
if(!buf)
return;
assert(buf->vtbl->fence);
buf->vtbl->fence(buf, fence);
}
static INLINE void
pb_destroy(struct pb_buffer *buf)
{
assert(buf);
if(!buf)
return;
assert(!pipe_is_referenced(&buf->reference));
buf->vtbl->destroy(buf);
}
static INLINE void
pb_reference(struct pb_buffer **dst,
struct pb_buffer *src)
{
struct pb_buffer *old = *dst;
if (pipe_reference(&(*dst)->reference, &src->reference))
pb_destroy( old );
*dst = src;
}
/**
* Utility function to check whether the provided alignment is consistent with
* the requested or not.
*/
static INLINE boolean
pb_check_alignment(pb_size requested, pb_size provided)
{
if(!requested)
return TRUE;
if(requested > provided)
return FALSE;
if(provided % requested != 0)
return FALSE;
return TRUE;
}
/**
* Utility function to check whether the provided alignment is consistent with
* the requested or not.
*/
static INLINE boolean
pb_check_usage(unsigned requested, unsigned provided)
{
return (requested & provided) == requested ? TRUE : FALSE;
}
/**
* Malloc-based buffer to store data that can't be used by the graphics
* hardware.
*/
struct pb_buffer *
pb_malloc_buffer_create(pb_size size,
const struct pb_desc *desc);
#ifdef __cplusplus
}
#endif
#endif /*PB_BUFFER_H_*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,104 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Buffer fencing.
*
* "Fenced buffers" is actually a misnomer. They should be referred as
* "fenceable buffers", i.e, buffers that can be fenced, but I couldn't find
* the word "fenceable" in the dictionary.
*
* A "fenced buffer" is a decorator around a normal buffer, which adds two
* special properties:
* - the ability for the destruction to be delayed by a fence;
* - reference counting.
*
* Usually DMA buffers have a life-time that will extend the life-time of its
* handle. The end-of-life is dictated by the fence signalling.
*
* Between the handle's destruction, and the fence signalling, the buffer is
* stored in a fenced buffer list.
*
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#ifndef PB_BUFFER_FENCED_H_
#define PB_BUFFER_FENCED_H_
#include "util/u_debug.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pipe_fence_handle;
/**
* List of buffers which are awaiting fence signalling.
*/
struct fenced_buffer_list;
struct pb_fence_ops
{
void (*destroy)( struct pb_fence_ops *ops );
/** Set ptr = fence, with reference counting */
void (*fence_reference)( struct pb_fence_ops *ops,
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *fence );
/**
* Checks whether the fence has been signalled.
* \param flags driver-specific meaning
* \return zero on success.
*/
int (*fence_signalled)( struct pb_fence_ops *ops,
struct pipe_fence_handle *fence,
unsigned flag );
/**
* Wait for the fence to finish.
* \param flags driver-specific meaning
* \return zero on success.
*/
int (*fence_finish)( struct pb_fence_ops *ops,
struct pipe_fence_handle *fence,
unsigned flag );
};
#ifdef __cplusplus
}
#endif
#endif /*PB_BUFFER_FENCED_H_*/

View File

@ -0,0 +1,198 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Implementation of malloc-based buffers to store data that can't be processed
* by the hardware.
*
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "util/u_debug.h"
#include "util/u_memory.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
struct malloc_buffer
{
struct pb_buffer base;
void *data;
};
extern const struct pb_vtbl malloc_buffer_vtbl;
static INLINE struct malloc_buffer *
malloc_buffer(struct pb_buffer *buf)
{
assert(buf);
if (!buf)
return NULL;
assert(buf->vtbl == &malloc_buffer_vtbl);
return (struct malloc_buffer *)buf;
}
static void
malloc_buffer_destroy(struct pb_buffer *buf)
{
align_free(malloc_buffer(buf)->data);
FREE(buf);
}
static void *
malloc_buffer_map(struct pb_buffer *buf,
unsigned flags,
void *flush_ctx)
{
return malloc_buffer(buf)->data;
}
static void
malloc_buffer_unmap(struct pb_buffer *buf)
{
/* No-op */
}
static enum pipe_error
malloc_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
unsigned flags)
{
assert(0);
return PIPE_ERROR;
}
static void
malloc_buffer_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
assert(0);
}
static void
malloc_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
*base_buf = buf;
*offset = 0;
}
const struct pb_vtbl
malloc_buffer_vtbl = {
malloc_buffer_destroy,
malloc_buffer_map,
malloc_buffer_unmap,
malloc_buffer_validate,
malloc_buffer_fence,
malloc_buffer_get_base_buffer
};
struct pb_buffer *
pb_malloc_buffer_create(pb_size size,
const struct pb_desc *desc)
{
struct malloc_buffer *buf;
/* TODO: do a single allocation */
buf = CALLOC_STRUCT(malloc_buffer);
if(!buf)
return NULL;
pipe_reference_init(&buf->base.reference, 1);
buf->base.usage = desc->usage;
buf->base.size = size;
buf->base.alignment = desc->alignment;
buf->base.vtbl = &malloc_buffer_vtbl;
buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
if(!buf->data) {
FREE(buf);
return NULL;
}
return &buf->base;
}
static struct pb_buffer *
pb_malloc_bufmgr_create_buffer(struct pb_manager *mgr,
pb_size size,
const struct pb_desc *desc)
{
return pb_malloc_buffer_create(size, desc);
}
static void
pb_malloc_bufmgr_flush(struct pb_manager *mgr)
{
/* No-op */
}
static void
pb_malloc_bufmgr_destroy(struct pb_manager *mgr)
{
/* No-op */
}
static boolean
pb_malloc_bufmgr_is_buffer_busy( struct pb_manager *mgr,
struct pb_buffer *buf )
{
return FALSE;
}
static struct pb_manager
pb_malloc_bufmgr = {
pb_malloc_bufmgr_destroy,
pb_malloc_bufmgr_create_buffer,
pb_malloc_bufmgr_flush,
pb_malloc_bufmgr_is_buffer_busy
};
struct pb_manager *
pb_malloc_bufmgr_create(void)
{
return &pb_malloc_bufmgr;
}

View File

@ -0,0 +1,218 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Buffer management.
*
* A buffer manager does only one basic thing: it creates buffers. Actually,
* "buffer factory" would probably a more accurate description.
*
* You can chain buffer managers so that you can have a finer grained memory
* management and pooling.
*
* For example, for a simple batch buffer manager you would chain:
* - the native buffer manager, which provides DMA memory from the graphics
* memory space;
* - the pool buffer manager, which keep around a pool of equally sized buffers
* to avoid latency associated with the native buffer manager;
* - the fenced buffer manager, which will delay buffer destruction until the
* the moment the card finishing processing it.
*
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#ifndef PB_BUFMGR_H_
#define PB_BUFMGR_H_
#include "pb_buffer.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pb_desc;
/**
* Abstract base class for all buffer managers.
*/
struct pb_manager
{
void
(*destroy)( struct pb_manager *mgr );
struct pb_buffer *
(*create_buffer)( struct pb_manager *mgr,
pb_size size,
const struct pb_desc *desc);
/**
* Flush all temporary-held buffers.
*
* Used mostly to aid debugging memory issues or to clean up resources when
* the drivers are long lived.
*/
void
(*flush)( struct pb_manager *mgr );
boolean
(*is_buffer_busy)( struct pb_manager *mgr,
struct pb_buffer *buf );
};
/**
* Malloc buffer provider.
*
* Simple wrapper around pb_malloc_buffer_create for convenience.
*/
struct pb_manager *
pb_malloc_bufmgr_create(void);
/**
* Static buffer pool sub-allocator.
*
* Manages the allocation of equally sized buffers. It does so by allocating
* a single big buffer and divide it equally sized buffers.
*
* It is meant to manage the allocation of batch buffer pools.
*/
struct pb_manager *
pool_bufmgr_create(struct pb_manager *provider,
pb_size n, pb_size size,
const struct pb_desc *desc);
/**
* Static sub-allocator based the old memory manager.
*
* It managers buffers of different sizes. It does so by allocating a buffer
* with the size of the heap, and then using the old mm memory manager to manage
* that heap.
*/
struct pb_manager *
mm_bufmgr_create(struct pb_manager *provider,
pb_size size, pb_size align2);
/**
* Same as mm_bufmgr_create.
*
* Buffer will be release when the manager is destroyed.
*/
struct pb_manager *
mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
pb_size size, pb_size align2);
/**
* Slab sub-allocator.
*/
struct pb_manager *
pb_slab_manager_create(struct pb_manager *provider,
pb_size bufSize,
pb_size slabSize,
const struct pb_desc *desc);
/**
* Allow a range of buffer size, by aggregating multiple slabs sub-allocators
* with different bucket sizes.
*/
struct pb_manager *
pb_slab_range_manager_create(struct pb_manager *provider,
pb_size minBufSize,
pb_size maxBufSize,
pb_size slabSize,
const struct pb_desc *desc);
/**
* Time-based buffer cache.
*
* This manager keeps a cache of destroyed buffers during a time interval.
*/
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider,
unsigned usecs);
struct pb_fence_ops;
/**
* Fenced buffer manager.
*
* This manager is just meant for convenience. It wraps the buffers returned
* by another manager in fenced buffers, so that
*
* NOTE: the buffer manager that provides the buffers will be destroyed
* at the same time.
*/
struct pb_manager *
fenced_bufmgr_create(struct pb_manager *provider,
struct pb_fence_ops *ops,
pb_size max_buffer_size,
pb_size max_cpu_total_size);
struct pb_manager *
pb_alt_manager_create(struct pb_manager *provider1,
struct pb_manager *provider2);
/**
* Ondemand buffer manager.
*
* Buffers are created in malloc'ed memory (fast and cached), and the constents
* is transfered to a buffer from the provider (typically in slow uncached
* memory) when there is an attempt to validate the buffer.
*
* Ideal for situations where one does not know before hand whether a given
* buffer will effectively be used by the hardware or not.
*/
struct pb_manager *
pb_ondemand_manager_create(struct pb_manager *provider);
/**
* Debug buffer manager to detect buffer under- and overflows.
*
* Under/overflow sizes should be a multiple of the largest alignment
*/
struct pb_manager *
pb_debug_manager_create(struct pb_manager *provider,
pb_size underflow_size, pb_size overflow_size);
#ifdef __cplusplus
}
#endif
#endif /*PB_BUFMGR_H_*/

View File

@ -0,0 +1,120 @@
/**************************************************************************
*
* Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Allocate buffers from two alternative buffer providers.
*
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "util/u_memory.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
struct pb_alt_manager
{
struct pb_manager base;
struct pb_manager *provider1;
struct pb_manager *provider2;
};
static INLINE struct pb_alt_manager *
pb_alt_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct pb_alt_manager *)mgr;
}
static struct pb_buffer *
pb_alt_manager_create_buffer(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pb_alt_manager *mgr = pb_alt_manager(_mgr);
struct pb_buffer *buf;
buf = mgr->provider1->create_buffer(mgr->provider1, size, desc);
if(buf)
return buf;
buf = mgr->provider2->create_buffer(mgr->provider2, size, desc);
return buf;
}
static void
pb_alt_manager_flush(struct pb_manager *_mgr)
{
struct pb_alt_manager *mgr = pb_alt_manager(_mgr);
assert(mgr->provider1->flush);
if(mgr->provider1->flush)
mgr->provider1->flush(mgr->provider1);
assert(mgr->provider2->flush);
if(mgr->provider2->flush)
mgr->provider2->flush(mgr->provider2);
}
static void
pb_alt_manager_destroy(struct pb_manager *mgr)
{
FREE(mgr);
}
struct pb_manager *
pb_alt_manager_create(struct pb_manager *provider1,
struct pb_manager *provider2)
{
struct pb_alt_manager *mgr;
if(!provider1 || !provider2)
return NULL;
mgr = CALLOC_STRUCT(pb_alt_manager);
if (!mgr)
return NULL;
mgr->base.destroy = pb_alt_manager_destroy;
mgr->base.create_buffer = pb_alt_manager_create_buffer;
mgr->base.flush = pb_alt_manager_flush;
mgr->provider1 = provider1;
mgr->provider2 = provider2;
return &mgr->base;
}

View File

@ -0,0 +1,411 @@
/**************************************************************************
*
* Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Buffer cache.
*
* \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
* \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "os/os_thread.h"
#include "util/u_memory.h"
#include "util/u_double_list.h"
#include "util/u_time.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
/**
* Convenience macro (type safe).
*/
#define SUPER(__derived) (&(__derived)->base)
struct pb_cache_manager;
/**
* Wrapper around a pipe buffer which adds delayed destruction.
*/
struct pb_cache_buffer
{
struct pb_buffer base;
struct pb_buffer *buffer;
struct pb_cache_manager *mgr;
/** Caching time interval */
int64_t start, end;
struct list_head head;
};
struct pb_cache_manager
{
struct pb_manager base;
struct pb_manager *provider;
unsigned usecs;
pipe_mutex mutex;
struct list_head delayed;
pb_size numDelayed;
};
static INLINE struct pb_cache_buffer *
pb_cache_buffer(struct pb_buffer *buf)
{
assert(buf);
return (struct pb_cache_buffer *)buf;
}
static INLINE struct pb_cache_manager *
pb_cache_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct pb_cache_manager *)mgr;
}
/**
* Actually destroy the buffer.
*/
static INLINE void
_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
{
struct pb_cache_manager *mgr = buf->mgr;
LIST_DEL(&buf->head);
assert(mgr->numDelayed);
--mgr->numDelayed;
assert(!pipe_is_referenced(&buf->base.reference));
pb_reference(&buf->buffer, NULL);
FREE(buf);
}
/**
* Free as many cache buffers from the list head as possible.
*/
static void
_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
{
struct list_head *curr, *next;
struct pb_cache_buffer *buf;
int64_t now;
now = os_time_get();
curr = mgr->delayed.next;
next = curr->next;
while(curr != &mgr->delayed) {
buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
if(!os_time_timeout(buf->start, buf->end, now))
break;
_pb_cache_buffer_destroy(buf);
curr = next;
next = curr->next;
}
}
static void
pb_cache_buffer_destroy(struct pb_buffer *_buf)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
struct pb_cache_manager *mgr = buf->mgr;
pipe_mutex_lock(mgr->mutex);
assert(!pipe_is_referenced(&buf->base.reference));
_pb_cache_buffer_list_check_free(mgr);
buf->start = os_time_get();
buf->end = buf->start + mgr->usecs;
LIST_ADDTAIL(&buf->head, &mgr->delayed);
++mgr->numDelayed;
pipe_mutex_unlock(mgr->mutex);
}
static void *
pb_cache_buffer_map(struct pb_buffer *_buf,
unsigned flags, void *flush_ctx)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
return pb_map(buf->buffer, flags, flush_ctx);
}
static void
pb_cache_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
pb_unmap(buf->buffer);
}
static enum pipe_error
pb_cache_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
return pb_validate(buf->buffer, vl, flags);
}
static void
pb_cache_buffer_fence(struct pb_buffer *_buf,
struct pipe_fence_handle *fence)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
pb_fence(buf->buffer, fence);
}
static void
pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
pb_get_base_buffer(buf->buffer, base_buf, offset);
}
const struct pb_vtbl
pb_cache_buffer_vtbl = {
pb_cache_buffer_destroy,
pb_cache_buffer_map,
pb_cache_buffer_unmap,
pb_cache_buffer_validate,
pb_cache_buffer_fence,
pb_cache_buffer_get_base_buffer
};
static INLINE int
pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
pb_size size,
const struct pb_desc *desc)
{
if(buf->base.size < size)
return 0;
/* be lenient with size */
if(buf->base.size >= 2*size)
return 0;
if(!pb_check_alignment(desc->alignment, buf->base.alignment))
return 0;
if(!pb_check_usage(desc->usage, buf->base.usage))
return 0;
if (buf->mgr->provider->is_buffer_busy) {
if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
return -1;
} else {
void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);
if (!ptr)
return -1;
pb_unmap(buf->buffer);
}
return 1;
}
static struct pb_buffer *
pb_cache_manager_create_buffer(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
struct pb_cache_buffer *buf;
struct pb_cache_buffer *curr_buf;
struct list_head *curr, *next;
int64_t now;
int ret = 0;
pipe_mutex_lock(mgr->mutex);
buf = NULL;
curr = mgr->delayed.next;
next = curr->next;
/* search in the expired buffers, freeing them in the process */
now = os_time_get();
while(curr != &mgr->delayed) {
curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
if(!buf && (ret = pb_cache_is_buffer_compat(curr_buf, size, desc) > 0))
buf = curr_buf;
else if(os_time_timeout(curr_buf->start, curr_buf->end, now))
_pb_cache_buffer_destroy(curr_buf);
else
/* This buffer (and all hereafter) are still hot in cache */
break;
if (ret == -1)
break;
curr = next;
next = curr->next;
}
/* keep searching in the hot buffers */
if(!buf && ret != -1) {
while(curr != &mgr->delayed) {
curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
ret = pb_cache_is_buffer_compat(curr_buf, size, desc);
if (ret > 0) {
buf = curr_buf;
break;
}
if (ret == -1)
break;
/* no need to check the timeout here */
curr = next;
next = curr->next;
}
}
if(buf) {
LIST_DEL(&buf->head);
--mgr->numDelayed;
pipe_mutex_unlock(mgr->mutex);
/* Increase refcount */
pipe_reference_init(&buf->base.reference, 1);
return &buf->base;
}
pipe_mutex_unlock(mgr->mutex);
buf = CALLOC_STRUCT(pb_cache_buffer);
if(!buf)
return NULL;
buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
/* Empty the cache and try again. */
if (!buf->buffer) {
mgr->base.flush(&mgr->base);
buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
}
if(!buf->buffer) {
FREE(buf);
return NULL;
}
assert(pipe_is_referenced(&buf->buffer->reference));
assert(pb_check_alignment(desc->alignment, buf->buffer->alignment));
assert(pb_check_usage(desc->usage, buf->buffer->usage));
assert(buf->buffer->size >= size);
pipe_reference_init(&buf->base.reference, 1);
buf->base.alignment = buf->buffer->alignment;
buf->base.usage = buf->buffer->usage;
buf->base.size = buf->buffer->size;
buf->base.vtbl = &pb_cache_buffer_vtbl;
buf->mgr = mgr;
return &buf->base;
}
static void
pb_cache_manager_flush(struct pb_manager *_mgr)
{
struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
struct list_head *curr, *next;
struct pb_cache_buffer *buf;
pipe_mutex_lock(mgr->mutex);
curr = mgr->delayed.next;
next = curr->next;
while(curr != &mgr->delayed) {
buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
_pb_cache_buffer_destroy(buf);
curr = next;
next = curr->next;
}
pipe_mutex_unlock(mgr->mutex);
assert(mgr->provider->flush);
if(mgr->provider->flush)
mgr->provider->flush(mgr->provider);
}
static void
pb_cache_manager_destroy(struct pb_manager *mgr)
{
pb_cache_manager_flush(mgr);
FREE(mgr);
}
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider,
unsigned usecs)
{
struct pb_cache_manager *mgr;
if(!provider)
return NULL;
mgr = CALLOC_STRUCT(pb_cache_manager);
if (!mgr)
return NULL;
mgr->base.destroy = pb_cache_manager_destroy;
mgr->base.create_buffer = pb_cache_manager_create_buffer;
mgr->base.flush = pb_cache_manager_flush;
mgr->provider = provider;
mgr->usecs = usecs;
LIST_INITHEAD(&mgr->delayed);
mgr->numDelayed = 0;
pipe_mutex_init(mgr->mutex);
return &mgr->base;
}

View File

@ -0,0 +1,497 @@
/**************************************************************************
*
* Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Debug buffer manager to detect buffer under- and overflows.
*
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "os/os_thread.h"
#include "util/u_math.h"
#include "util/u_memory.h"
#include "util/u_double_list.h"
#include "util/u_time.h"
#include "util/u_debug_stack.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
#ifdef DEBUG
#define PB_DEBUG_CREATE_BACKTRACE 8
#define PB_DEBUG_MAP_BACKTRACE 8
/**
* Convenience macro (type safe).
*/
#define SUPER(__derived) (&(__derived)->base)
struct pb_debug_manager;
/**
* Wrapper around a pipe buffer which adds delayed destruction.
*/
struct pb_debug_buffer
{
struct pb_buffer base;
struct pb_buffer *buffer;
struct pb_debug_manager *mgr;
pb_size underflow_size;
pb_size overflow_size;
struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
pipe_mutex mutex;
unsigned map_count;
struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
struct list_head head;
};
struct pb_debug_manager
{
struct pb_manager base;
struct pb_manager *provider;
pb_size underflow_size;
pb_size overflow_size;
pipe_mutex mutex;
struct list_head list;
};
static INLINE struct pb_debug_buffer *
pb_debug_buffer(struct pb_buffer *buf)
{
assert(buf);
return (struct pb_debug_buffer *)buf;
}
static INLINE struct pb_debug_manager *
pb_debug_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct pb_debug_manager *)mgr;
}
static const uint8_t random_pattern[32] = {
0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
};
static INLINE void
fill_random_pattern(uint8_t *dst, pb_size size)
{
pb_size i = 0;
while(size--) {
*dst++ = random_pattern[i++];
i &= sizeof(random_pattern) - 1;
}
}
static INLINE boolean
check_random_pattern(const uint8_t *dst, pb_size size,
pb_size *min_ofs, pb_size *max_ofs)
{
boolean result = TRUE;
pb_size i;
*min_ofs = size;
*max_ofs = 0;
for(i = 0; i < size; ++i) {
if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
*min_ofs = MIN2(*min_ofs, i);
*max_ofs = MAX2(*max_ofs, i);
result = FALSE;
}
}
return result;
}
static void
pb_debug_buffer_fill(struct pb_debug_buffer *buf)
{
uint8_t *map;
map = pb_map(buf->buffer, PB_USAGE_CPU_WRITE, NULL);
assert(map);
if(map) {
fill_random_pattern(map, buf->underflow_size);
fill_random_pattern(map + buf->underflow_size + buf->base.size,
buf->overflow_size);
pb_unmap(buf->buffer);
}
}
/**
* Check for under/over flows.
*
* Should be called with the buffer unmaped.
*/
static void
pb_debug_buffer_check(struct pb_debug_buffer *buf)
{
uint8_t *map;
map = pb_map(buf->buffer,
PB_USAGE_CPU_READ |
PB_USAGE_UNSYNCHRONIZED, NULL);
assert(map);
if(map) {
boolean underflow, overflow;
pb_size min_ofs, max_ofs;
underflow = !check_random_pattern(map, buf->underflow_size,
&min_ofs, &max_ofs);
if(underflow) {
debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
buf->underflow_size - min_ofs,
min_ofs == 0 ? "+" : "",
buf->underflow_size - max_ofs);
}
overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
buf->overflow_size,
&min_ofs, &max_ofs);
if(overflow) {
debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
buf->base.size,
min_ofs,
max_ofs,
max_ofs == buf->overflow_size - 1 ? "+" : "");
}
if(underflow || overflow)
debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
debug_assert(!underflow);
debug_assert(!overflow);
/* re-fill if not aborted */
if(underflow)
fill_random_pattern(map, buf->underflow_size);
if(overflow)
fill_random_pattern(map + buf->underflow_size + buf->base.size,
buf->overflow_size);
pb_unmap(buf->buffer);
}
}
static void
pb_debug_buffer_destroy(struct pb_buffer *_buf)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
struct pb_debug_manager *mgr = buf->mgr;
assert(!pipe_is_referenced(&buf->base.reference));
pb_debug_buffer_check(buf);
pipe_mutex_lock(mgr->mutex);
LIST_DEL(&buf->head);
pipe_mutex_unlock(mgr->mutex);
pipe_mutex_destroy(buf->mutex);
pb_reference(&buf->buffer, NULL);
FREE(buf);
}
static void *
pb_debug_buffer_map(struct pb_buffer *_buf,
unsigned flags, void *flush_ctx)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
void *map;
pb_debug_buffer_check(buf);
map = pb_map(buf->buffer, flags, flush_ctx);
if(!map)
return NULL;
if(map) {
pipe_mutex_lock(buf->mutex);
++buf->map_count;
debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
pipe_mutex_unlock(buf->mutex);
}
return (uint8_t *)map + buf->underflow_size;
}
static void
pb_debug_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
pipe_mutex_lock(buf->mutex);
assert(buf->map_count);
if(buf->map_count)
--buf->map_count;
pipe_mutex_unlock(buf->mutex);
pb_unmap(buf->buffer);
pb_debug_buffer_check(buf);
}
static void
pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
pb_get_base_buffer(buf->buffer, base_buf, offset);
*offset += buf->underflow_size;
}
static enum pipe_error
pb_debug_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
pipe_mutex_lock(buf->mutex);
if(buf->map_count) {
debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
debug_printf("last map backtrace is\n");
debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
}
pipe_mutex_unlock(buf->mutex);
pb_debug_buffer_check(buf);
return pb_validate(buf->buffer, vl, flags);
}
static void
pb_debug_buffer_fence(struct pb_buffer *_buf,
struct pipe_fence_handle *fence)
{
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
pb_fence(buf->buffer, fence);
}
const struct pb_vtbl
pb_debug_buffer_vtbl = {
pb_debug_buffer_destroy,
pb_debug_buffer_map,
pb_debug_buffer_unmap,
pb_debug_buffer_validate,
pb_debug_buffer_fence,
pb_debug_buffer_get_base_buffer
};
static void
pb_debug_manager_dump_locked(struct pb_debug_manager *mgr)
{
struct list_head *curr, *next;
struct pb_debug_buffer *buf;
curr = mgr->list.next;
next = curr->next;
while(curr != &mgr->list) {
buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
debug_printf("buffer = %p\n", (void *) buf);
debug_printf(" .size = 0x%x\n", buf->base.size);
debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
curr = next;
next = curr->next;
}
}
static struct pb_buffer *
pb_debug_manager_create_buffer(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
struct pb_debug_buffer *buf;
struct pb_desc real_desc;
pb_size real_size;
assert(size);
assert(desc->alignment);
buf = CALLOC_STRUCT(pb_debug_buffer);
if(!buf)
return NULL;
real_size = mgr->underflow_size + size + mgr->overflow_size;
real_desc = *desc;
real_desc.usage |= PB_USAGE_CPU_WRITE;
real_desc.usage |= PB_USAGE_CPU_READ;
buf->buffer = mgr->provider->create_buffer(mgr->provider,
real_size,
&real_desc);
if(!buf->buffer) {
FREE(buf);
#if 0
pipe_mutex_lock(mgr->mutex);
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
if(!LIST_IS_EMPTY(&mgr->list))
pb_debug_manager_dump_locked(mgr);
pipe_mutex_unlock(mgr->mutex);
#endif
return NULL;
}
assert(pipe_is_referenced(&buf->buffer->reference));
assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
assert(pb_check_usage(real_desc.usage, buf->buffer->usage));
assert(buf->buffer->size >= real_size);
pipe_reference_init(&buf->base.reference, 1);
buf->base.alignment = desc->alignment;
buf->base.usage = desc->usage;
buf->base.size = size;
buf->base.vtbl = &pb_debug_buffer_vtbl;
buf->mgr = mgr;
buf->underflow_size = mgr->underflow_size;
buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
pb_debug_buffer_fill(buf);
pipe_mutex_init(buf->mutex);
pipe_mutex_lock(mgr->mutex);
LIST_ADDTAIL(&buf->head, &mgr->list);
pipe_mutex_unlock(mgr->mutex);
return &buf->base;
}
static void
pb_debug_manager_flush(struct pb_manager *_mgr)
{
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
assert(mgr->provider->flush);
if(mgr->provider->flush)
mgr->provider->flush(mgr->provider);
}
static void
pb_debug_manager_destroy(struct pb_manager *_mgr)
{
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
pipe_mutex_lock(mgr->mutex);
if(!LIST_IS_EMPTY(&mgr->list)) {
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
pb_debug_manager_dump_locked(mgr);
}
pipe_mutex_unlock(mgr->mutex);
pipe_mutex_destroy(mgr->mutex);
mgr->provider->destroy(mgr->provider);
FREE(mgr);
}
struct pb_manager *
pb_debug_manager_create(struct pb_manager *provider,
pb_size underflow_size, pb_size overflow_size)
{
struct pb_debug_manager *mgr;
if(!provider)
return NULL;
mgr = CALLOC_STRUCT(pb_debug_manager);
if (!mgr)
return NULL;
mgr->base.destroy = pb_debug_manager_destroy;
mgr->base.create_buffer = pb_debug_manager_create_buffer;
mgr->base.flush = pb_debug_manager_flush;
mgr->provider = provider;
mgr->underflow_size = underflow_size;
mgr->overflow_size = overflow_size;
pipe_mutex_init(mgr->mutex);
LIST_INITHEAD(&mgr->list);
return &mgr->base;
}
#else /* !DEBUG */
struct pb_manager *
pb_debug_manager_create(struct pb_manager *provider,
pb_size underflow_size, pb_size overflow_size)
{
return provider;
}
#endif /* !DEBUG */

View File

@ -0,0 +1,320 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file
* Buffer manager using the old texture memory manager.
*
* \author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "pipe/p_defines.h"
#include "util/u_debug.h"
#include "os/os_thread.h"
#include "util/u_memory.h"
#include "util/u_double_list.h"
#include "util/u_mm.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
/**
* Convenience macro (type safe).
*/
#define SUPER(__derived) (&(__derived)->base)
struct mm_pb_manager
{
struct pb_manager base;
pipe_mutex mutex;
pb_size size;
struct mem_block *heap;
pb_size align2;
struct pb_buffer *buffer;
void *map;
};
static INLINE struct mm_pb_manager *
mm_pb_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct mm_pb_manager *)mgr;
}
struct mm_buffer
{
struct pb_buffer base;
struct mm_pb_manager *mgr;
struct mem_block *block;
};
static INLINE struct mm_buffer *
mm_buffer(struct pb_buffer *buf)
{
assert(buf);
return (struct mm_buffer *)buf;
}
static void
mm_buffer_destroy(struct pb_buffer *buf)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
struct mm_pb_manager *mm = mm_buf->mgr;
assert(!pipe_is_referenced(&mm_buf->base.reference));
pipe_mutex_lock(mm->mutex);
u_mmFreeMem(mm_buf->block);
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
}
static void *
mm_buffer_map(struct pb_buffer *buf,
unsigned flags,
void *flush_ctx)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
struct mm_pb_manager *mm = mm_buf->mgr;
/* XXX: it will be necessary to remap here to propagate flush_ctx */
return (unsigned char *) mm->map + mm_buf->block->ofs;
}
static void
mm_buffer_unmap(struct pb_buffer *buf)
{
/* No-op */
}
static enum pipe_error
mm_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
unsigned flags)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
struct mm_pb_manager *mm = mm_buf->mgr;
return pb_validate(mm->buffer, vl, flags);
}
static void
mm_buffer_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
struct mm_pb_manager *mm = mm_buf->mgr;
pb_fence(mm->buffer, fence);
}
static void
mm_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct mm_buffer *mm_buf = mm_buffer(buf);
struct mm_pb_manager *mm = mm_buf->mgr;
pb_get_base_buffer(mm->buffer, base_buf, offset);
*offset += mm_buf->block->ofs;
}
static const struct pb_vtbl
mm_buffer_vtbl = {
mm_buffer_destroy,
mm_buffer_map,
mm_buffer_unmap,
mm_buffer_validate,
mm_buffer_fence,
mm_buffer_get_base_buffer
};
static struct pb_buffer *
mm_bufmgr_create_buffer(struct pb_manager *mgr,
pb_size size,
const struct pb_desc *desc)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
struct mm_buffer *mm_buf;
/* We don't handle alignments larger then the one initially setup */
assert(pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2));
if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
return NULL;
pipe_mutex_lock(mm->mutex);
mm_buf = CALLOC_STRUCT(mm_buffer);
if (!mm_buf) {
pipe_mutex_unlock(mm->mutex);
return NULL;
}
pipe_reference_init(&mm_buf->base.reference, 1);
mm_buf->base.alignment = desc->alignment;
mm_buf->base.usage = desc->usage;
mm_buf->base.size = size;
mm_buf->base.vtbl = &mm_buffer_vtbl;
mm_buf->mgr = mm;
mm_buf->block = u_mmAllocMem(mm->heap, (int)size, (int)mm->align2, 0);
if(!mm_buf->block) {
#if 0
debug_printf("warning: heap full\n");
mmDumpMemInfo(mm->heap);
#endif
FREE(mm_buf);
pipe_mutex_unlock(mm->mutex);
return NULL;
}
/* Some sanity checks */
assert(0 <= (pb_size)mm_buf->block->ofs && (pb_size)mm_buf->block->ofs < mm->size);
assert(size <= (pb_size)mm_buf->block->size && (pb_size)mm_buf->block->ofs + (pb_size)mm_buf->block->size <= mm->size);
pipe_mutex_unlock(mm->mutex);
return SUPER(mm_buf);
}
static void
mm_bufmgr_flush(struct pb_manager *mgr)
{
/* No-op */
}
static void
mm_bufmgr_destroy(struct pb_manager *mgr)
{
struct mm_pb_manager *mm = mm_pb_manager(mgr);
pipe_mutex_lock(mm->mutex);
u_mmDestroy(mm->heap);
pb_unmap(mm->buffer);
pb_reference(&mm->buffer, NULL);
pipe_mutex_unlock(mm->mutex);
FREE(mgr);
}
struct pb_manager *
mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
pb_size size, pb_size align2)
{
struct mm_pb_manager *mm;
if(!buffer)
return NULL;
mm = CALLOC_STRUCT(mm_pb_manager);
if (!mm)
return NULL;
mm->base.destroy = mm_bufmgr_destroy;
mm->base.create_buffer = mm_bufmgr_create_buffer;
mm->base.flush = mm_bufmgr_flush;
mm->size = size;
mm->align2 = align2; /* 64-byte alignment */
pipe_mutex_init(mm->mutex);
mm->buffer = buffer;
mm->map = pb_map(mm->buffer,
PB_USAGE_CPU_READ |
PB_USAGE_CPU_WRITE, NULL);
if(!mm->map)
goto failure;
mm->heap = u_mmInit(0, (int)size);
if (!mm->heap)
goto failure;
return SUPER(mm);
failure:
if(mm->heap)
u_mmDestroy(mm->heap);
if(mm->map)
pb_unmap(mm->buffer);
FREE(mm);
return NULL;
}
struct pb_manager *
mm_bufmgr_create(struct pb_manager *provider,
pb_size size, pb_size align2)
{
struct pb_buffer *buffer;
struct pb_manager *mgr;
struct pb_desc desc;
if(!provider)
return NULL;
memset(&desc, 0, sizeof(desc));
desc.alignment = 1 << align2;
buffer = provider->create_buffer(provider, size, &desc);
if (!buffer)
return NULL;
mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
if (!mgr) {
pb_reference(&buffer, NULL);
return NULL;
}
return mgr;
}

View File

@ -0,0 +1,305 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* A variation of malloc buffers which get transferred to real graphics memory
* when there is an attempt to validate them.
*
* @author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "util/u_debug.h"
#include "util/u_memory.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
struct pb_ondemand_manager;
struct pb_ondemand_buffer
{
struct pb_buffer base;
struct pb_ondemand_manager *mgr;
/** Regular malloc'ed memory */
void *data;
unsigned mapcount;
/** Real buffer */
struct pb_buffer *buffer;
pb_size size;
struct pb_desc desc;
};
struct pb_ondemand_manager
{
struct pb_manager base;
struct pb_manager *provider;
};
extern const struct pb_vtbl pb_ondemand_buffer_vtbl;
static INLINE struct pb_ondemand_buffer *
pb_ondemand_buffer(struct pb_buffer *buf)
{
assert(buf);
if (!buf)
return NULL;
assert(buf->vtbl == &pb_ondemand_buffer_vtbl);
return (struct pb_ondemand_buffer *)buf;
}
static INLINE struct pb_ondemand_manager *
pb_ondemand_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct pb_ondemand_manager *)mgr;
}
static void
pb_ondemand_buffer_destroy(struct pb_buffer *_buf)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
pb_reference(&buf->buffer, NULL);
align_free(buf->data);
FREE(buf);
}
static void *
pb_ondemand_buffer_map(struct pb_buffer *_buf,
unsigned flags, void *flush_ctx)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
if(buf->buffer) {
assert(!buf->data);
return pb_map(buf->buffer, flags, flush_ctx);
}
else {
assert(buf->data);
++buf->mapcount;
return buf->data;
}
}
static void
pb_ondemand_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
if(buf->buffer) {
assert(!buf->data);
pb_unmap(buf->buffer);
}
else {
assert(buf->data);
assert(buf->mapcount);
if(buf->mapcount)
--buf->mapcount;
}
}
static enum pipe_error
pb_ondemand_buffer_instantiate(struct pb_ondemand_buffer *buf)
{
if(!buf->buffer) {
struct pb_manager *provider = buf->mgr->provider;
uint8_t *map;
assert(!buf->mapcount);
buf->buffer = provider->create_buffer(provider, buf->size, &buf->desc);
if(!buf->buffer)
return PIPE_ERROR_OUT_OF_MEMORY;
map = pb_map(buf->buffer, PB_USAGE_CPU_READ, NULL);
if(!map) {
pb_reference(&buf->buffer, NULL);
return PIPE_ERROR;
}
memcpy(map, buf->data, buf->size);
pb_unmap(buf->buffer);
if(!buf->mapcount) {
FREE(buf->data);
buf->data = NULL;
}
}
return PIPE_OK;
}
static enum pipe_error
pb_ondemand_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
enum pipe_error ret;
assert(!buf->mapcount);
if(buf->mapcount)
return PIPE_ERROR;
ret = pb_ondemand_buffer_instantiate(buf);
if(ret != PIPE_OK)
return ret;
return pb_validate(buf->buffer, vl, flags);
}
static void
pb_ondemand_buffer_fence(struct pb_buffer *_buf,
struct pipe_fence_handle *fence)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
assert(buf->buffer);
if(!buf->buffer)
return;
pb_fence(buf->buffer, fence);
}
static void
pb_ondemand_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
if(pb_ondemand_buffer_instantiate(buf) != PIPE_OK) {
assert(0);
*base_buf = &buf->base;
*offset = 0;
return;
}
pb_get_base_buffer(buf->buffer, base_buf, offset);
}
const struct pb_vtbl
pb_ondemand_buffer_vtbl = {
pb_ondemand_buffer_destroy,
pb_ondemand_buffer_map,
pb_ondemand_buffer_unmap,
pb_ondemand_buffer_validate,
pb_ondemand_buffer_fence,
pb_ondemand_buffer_get_base_buffer
};
static struct pb_buffer *
pb_ondemand_manager_create_buffer(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
struct pb_ondemand_buffer *buf;
buf = CALLOC_STRUCT(pb_ondemand_buffer);
if(!buf)
return NULL;
pipe_reference_init(&buf->base.reference, 1);
buf->base.alignment = desc->alignment;
buf->base.usage = desc->usage;
buf->base.size = size;
buf->base.vtbl = &pb_ondemand_buffer_vtbl;
buf->mgr = mgr;
buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
if(!buf->data) {
FREE(buf);
return NULL;
}
buf->size = size;
buf->desc = *desc;
return &buf->base;
}
static void
pb_ondemand_manager_flush(struct pb_manager *_mgr)
{
struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
mgr->provider->flush(mgr->provider);
}
static void
pb_ondemand_manager_destroy(struct pb_manager *_mgr)
{
struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
FREE(mgr);
}
struct pb_manager *
pb_ondemand_manager_create(struct pb_manager *provider)
{
struct pb_ondemand_manager *mgr;
if(!provider)
return NULL;
mgr = CALLOC_STRUCT(pb_ondemand_manager);
if(!mgr)
return NULL;
mgr->base.destroy = pb_ondemand_manager_destroy;
mgr->base.create_buffer = pb_ondemand_manager_create_buffer;
mgr->base.flush = pb_ondemand_manager_flush;
mgr->provider = provider;
return &mgr->base;
}

View File

@ -0,0 +1,321 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/**
* \file
* Batch buffer pool management.
*
* \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
* \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "os/os_thread.h"
#include "pipe/p_defines.h"
#include "util/u_memory.h"
#include "util/u_double_list.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
/**
* Convenience macro (type safe).
*/
#define SUPER(__derived) (&(__derived)->base)
struct pool_pb_manager
{
struct pb_manager base;
pipe_mutex mutex;
pb_size bufSize;
pb_size bufAlign;
pb_size numFree;
pb_size numTot;
struct list_head free;
struct pb_buffer *buffer;
void *map;
struct pool_buffer *bufs;
};
static INLINE struct pool_pb_manager *
pool_pb_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct pool_pb_manager *)mgr;
}
struct pool_buffer
{
struct pb_buffer base;
struct pool_pb_manager *mgr;
struct list_head head;
pb_size start;
};
static INLINE struct pool_buffer *
pool_buffer(struct pb_buffer *buf)
{
assert(buf);
return (struct pool_buffer *)buf;
}
static void
pool_buffer_destroy(struct pb_buffer *buf)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
assert(!pipe_is_referenced(&pool_buf->base.reference));
pipe_mutex_lock(pool->mutex);
LIST_ADD(&pool_buf->head, &pool->free);
pool->numFree++;
pipe_mutex_unlock(pool->mutex);
}
static void *
pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
void *map;
/* XXX: it will be necessary to remap here to propagate flush_ctx */
pipe_mutex_lock(pool->mutex);
map = (unsigned char *) pool->map + pool_buf->start;
pipe_mutex_unlock(pool->mutex);
return map;
}
static void
pool_buffer_unmap(struct pb_buffer *buf)
{
/* No-op */
}
static enum pipe_error
pool_buffer_validate(struct pb_buffer *buf,
struct pb_validate *vl,
unsigned flags)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
return pb_validate(pool->buffer, vl, flags);
}
static void
pool_buffer_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
pb_fence(pool->buffer, fence);
}
static void
pool_buffer_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct pool_buffer *pool_buf = pool_buffer(buf);
struct pool_pb_manager *pool = pool_buf->mgr;
pb_get_base_buffer(pool->buffer, base_buf, offset);
*offset += pool_buf->start;
}
static const struct pb_vtbl
pool_buffer_vtbl = {
pool_buffer_destroy,
pool_buffer_map,
pool_buffer_unmap,
pool_buffer_validate,
pool_buffer_fence,
pool_buffer_get_base_buffer
};
static struct pb_buffer *
pool_bufmgr_create_buffer(struct pb_manager *mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
struct pool_buffer *pool_buf;
struct list_head *item;
assert(size == pool->bufSize);
assert(pool->bufAlign % desc->alignment == 0);
pipe_mutex_lock(pool->mutex);
if (pool->numFree == 0) {
pipe_mutex_unlock(pool->mutex);
debug_printf("warning: out of fixed size buffer objects\n");
return NULL;
}
item = pool->free.next;
if (item == &pool->free) {
pipe_mutex_unlock(pool->mutex);
debug_printf("error: fixed size buffer pool corruption\n");
return NULL;
}
LIST_DEL(item);
--pool->numFree;
pipe_mutex_unlock(pool->mutex);
pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
assert(!pipe_is_referenced(&pool_buf->base.reference));
pipe_reference_init(&pool_buf->base.reference, 1);
pool_buf->base.alignment = desc->alignment;
pool_buf->base.usage = desc->usage;
return SUPER(pool_buf);
}
static void
pool_bufmgr_flush(struct pb_manager *mgr)
{
/* No-op */
}
static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
struct pool_pb_manager *pool = pool_pb_manager(mgr);
pipe_mutex_lock(pool->mutex);
FREE(pool->bufs);
pb_unmap(pool->buffer);
pb_reference(&pool->buffer, NULL);
pipe_mutex_unlock(pool->mutex);
FREE(mgr);
}
struct pb_manager *
pool_bufmgr_create(struct pb_manager *provider,
pb_size numBufs,
pb_size bufSize,
const struct pb_desc *desc)
{
struct pool_pb_manager *pool;
struct pool_buffer *pool_buf;
pb_size i;
if(!provider)
return NULL;
pool = CALLOC_STRUCT(pool_pb_manager);
if (!pool)
return NULL;
pool->base.destroy = pool_bufmgr_destroy;
pool->base.create_buffer = pool_bufmgr_create_buffer;
pool->base.flush = pool_bufmgr_flush;
LIST_INITHEAD(&pool->free);
pool->numTot = numBufs;
pool->numFree = numBufs;
pool->bufSize = bufSize;
pool->bufAlign = desc->alignment;
pipe_mutex_init(pool->mutex);
pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc);
if (!pool->buffer)
goto failure;
pool->map = pb_map(pool->buffer,
PB_USAGE_CPU_READ |
PB_USAGE_CPU_WRITE, NULL);
if(!pool->map)
goto failure;
pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
if (!pool->bufs)
goto failure;
pool_buf = pool->bufs;
for (i = 0; i < numBufs; ++i) {
pipe_reference_init(&pool_buf->base.reference, 0);
pool_buf->base.alignment = 0;
pool_buf->base.usage = 0;
pool_buf->base.size = bufSize;
pool_buf->base.vtbl = &pool_buffer_vtbl;
pool_buf->mgr = pool;
pool_buf->start = i * bufSize;
LIST_ADDTAIL(&pool_buf->head, &pool->free);
pool_buf++;
}
return SUPER(pool);
failure:
FREE(pool->bufs);
if(pool->map)
pb_unmap(pool->buffer);
if(pool->buffer)
pb_reference(&pool->buffer, NULL);
FREE(pool);
return NULL;
}

View File

@ -0,0 +1,590 @@
/**************************************************************************
*
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, FREE of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/**
* @file
* S-lab pool implementation.
*
* @sa http://en.wikipedia.org/wiki/Slab_allocation
*
* @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* @author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "os/os_thread.h"
#include "pipe/p_defines.h"
#include "util/u_memory.h"
#include "util/u_double_list.h"
#include "util/u_time.h"
#include "pb_buffer.h"
#include "pb_bufmgr.h"
struct pb_slab;
/**
* Buffer in a slab.
*
* Sub-allocation of a contiguous buffer.
*/
struct pb_slab_buffer
{
struct pb_buffer base;
struct pb_slab *slab;
struct list_head head;
unsigned mapCount;
/** Offset relative to the start of the slab buffer. */
pb_size start;
/** Use when validating, to signal that all mappings are finished */
/* TODO: Actually validation does not reach this stage yet */
pipe_condvar event;
};
/**
* Slab -- a contiguous piece of memory.
*/
struct pb_slab
{
struct list_head head;
struct list_head freeBuffers;
pb_size numBuffers;
pb_size numFree;
struct pb_slab_buffer *buffers;
struct pb_slab_manager *mgr;
/** Buffer from the provider */
struct pb_buffer *bo;
void *virtual;
};
/**
* It adds/removes slabs as needed in order to meet the allocation/destruction
* of individual buffers.
*/
struct pb_slab_manager
{
struct pb_manager base;
/** From where we get our buffers */
struct pb_manager *provider;
/** Size of the buffers we hand on downstream */
pb_size bufSize;
/** Size of the buffers we request upstream */
pb_size slabSize;
/**
* Alignment, usage to be used to allocate the slab buffers.
*
* We can only provide buffers which are consistent (in alignment, usage)
* with this description.
*/
struct pb_desc desc;
/**
* Partial slabs
*
* Full slabs are not stored in any list. Empty slabs are destroyed
* immediatly.
*/
struct list_head slabs;
pipe_mutex mutex;
};
/**
* Wrapper around several slabs, therefore capable of handling buffers of
* multiple sizes.
*
* This buffer manager just dispatches buffer allocations to the appropriate slab
* manager, according to the requested buffer size, or by passes the slab
* managers altogether for even greater sizes.
*
* The data of this structure remains constant after
* initialization and thus needs no mutex protection.
*/
struct pb_slab_range_manager
{
struct pb_manager base;
struct pb_manager *provider;
pb_size minBufSize;
pb_size maxBufSize;
/** @sa pb_slab_manager::desc */
struct pb_desc desc;
unsigned numBuckets;
pb_size *bucketSizes;
/** Array of pb_slab_manager, one for each bucket size */
struct pb_manager **buckets;
};
static INLINE struct pb_slab_buffer *
pb_slab_buffer(struct pb_buffer *buf)
{
assert(buf);
return (struct pb_slab_buffer *)buf;
}
static INLINE struct pb_slab_manager *
pb_slab_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct pb_slab_manager *)mgr;
}
static INLINE struct pb_slab_range_manager *
pb_slab_range_manager(struct pb_manager *mgr)
{
assert(mgr);
return (struct pb_slab_range_manager *)mgr;
}
/**
* Delete a buffer from the slab delayed list and put
* it on the slab FREE list.
*/
static void
pb_slab_buffer_destroy(struct pb_buffer *_buf)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
struct pb_slab *slab = buf->slab;
struct pb_slab_manager *mgr = slab->mgr;
struct list_head *list = &buf->head;
pipe_mutex_lock(mgr->mutex);
assert(!pipe_is_referenced(&buf->base.reference));
buf->mapCount = 0;
LIST_DEL(list);
LIST_ADDTAIL(list, &slab->freeBuffers);
slab->numFree++;
if (slab->head.next == &slab->head)
LIST_ADDTAIL(&slab->head, &mgr->slabs);
/* If the slab becomes totally empty, free it */
if (slab->numFree == slab->numBuffers) {
list = &slab->head;
LIST_DELINIT(list);
pb_reference(&slab->bo, NULL);
FREE(slab->buffers);
FREE(slab);
}
pipe_mutex_unlock(mgr->mutex);
}
static void *
pb_slab_buffer_map(struct pb_buffer *_buf,
unsigned flags,
void *flush_ctx)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
/* XXX: it will be necessary to remap here to propagate flush_ctx */
++buf->mapCount;
return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
}
static void
pb_slab_buffer_unmap(struct pb_buffer *_buf)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
--buf->mapCount;
if (buf->mapCount == 0)
pipe_condvar_broadcast(buf->event);
}
static enum pipe_error
pb_slab_buffer_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
return pb_validate(buf->slab->bo, vl, flags);
}
static void
pb_slab_buffer_fence(struct pb_buffer *_buf,
struct pipe_fence_handle *fence)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
pb_fence(buf->slab->bo, fence);
}
static void
pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
struct pb_buffer **base_buf,
pb_size *offset)
{
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
pb_get_base_buffer(buf->slab->bo, base_buf, offset);
*offset += buf->start;
}
static const struct pb_vtbl
pb_slab_buffer_vtbl = {
pb_slab_buffer_destroy,
pb_slab_buffer_map,
pb_slab_buffer_unmap,
pb_slab_buffer_validate,
pb_slab_buffer_fence,
pb_slab_buffer_get_base_buffer
};
/**
* Create a new slab.
*
* Called when we ran out of free slabs.
*/
static enum pipe_error
pb_slab_create(struct pb_slab_manager *mgr)
{
struct pb_slab *slab;
struct pb_slab_buffer *buf;
unsigned numBuffers;
unsigned i;
enum pipe_error ret;
slab = CALLOC_STRUCT(pb_slab);
if (!slab)
return PIPE_ERROR_OUT_OF_MEMORY;
slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
if(!slab->bo) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err0;
}
/* Note down the slab virtual address. All mappings are accessed directly
* through this address so it is required that the buffer is pinned. */
slab->virtual = pb_map(slab->bo,
PB_USAGE_CPU_READ |
PB_USAGE_CPU_WRITE, NULL);
if(!slab->virtual) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err1;
}
pb_unmap(slab->bo);
numBuffers = slab->bo->size / mgr->bufSize;
slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
if (!slab->buffers) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out_err1;
}
LIST_INITHEAD(&slab->head);
LIST_INITHEAD(&slab->freeBuffers);
slab->numBuffers = numBuffers;
slab->numFree = 0;
slab->mgr = mgr;
buf = slab->buffers;
for (i=0; i < numBuffers; ++i) {
pipe_reference_init(&buf->base.reference, 0);
buf->base.size = mgr->bufSize;
buf->base.alignment = 0;
buf->base.usage = 0;
buf->base.vtbl = &pb_slab_buffer_vtbl;
buf->slab = slab;
buf->start = i* mgr->bufSize;
buf->mapCount = 0;
pipe_condvar_init(buf->event);
LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
}
/* Add this slab to the list of partial slabs */
LIST_ADDTAIL(&slab->head, &mgr->slabs);
return PIPE_OK;
out_err1:
pb_reference(&slab->bo, NULL);
out_err0:
FREE(slab);
return ret;
}
static struct pb_buffer *
pb_slab_manager_create_buffer(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
static struct pb_slab_buffer *buf;
struct pb_slab *slab;
struct list_head *list;
/* check size */
assert(size <= mgr->bufSize);
if(size > mgr->bufSize)
return NULL;
/* check if we can provide the requested alignment */
assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
return NULL;
assert(pb_check_alignment(desc->alignment, mgr->bufSize));
if(!pb_check_alignment(desc->alignment, mgr->bufSize))
return NULL;
assert(pb_check_usage(desc->usage, mgr->desc.usage));
if(!pb_check_usage(desc->usage, mgr->desc.usage))
return NULL;
pipe_mutex_lock(mgr->mutex);
/* Create a new slab, if we run out of partial slabs */
if (mgr->slabs.next == &mgr->slabs) {
(void) pb_slab_create(mgr);
if (mgr->slabs.next == &mgr->slabs) {
pipe_mutex_unlock(mgr->mutex);
return NULL;
}
}
/* Allocate the buffer from a partial (or just created) slab */
list = mgr->slabs.next;
slab = LIST_ENTRY(struct pb_slab, list, head);
/* If totally full remove from the partial slab list */
if (--slab->numFree == 0)
LIST_DELINIT(list);
list = slab->freeBuffers.next;
LIST_DELINIT(list);
pipe_mutex_unlock(mgr->mutex);
buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
pipe_reference_init(&buf->base.reference, 1);
buf->base.alignment = desc->alignment;
buf->base.usage = desc->usage;
return &buf->base;
}
static void
pb_slab_manager_flush(struct pb_manager *_mgr)
{
struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
assert(mgr->provider->flush);
if(mgr->provider->flush)
mgr->provider->flush(mgr->provider);
}
static void
pb_slab_manager_destroy(struct pb_manager *_mgr)
{
struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
/* TODO: cleanup all allocated buffers */
FREE(mgr);
}
struct pb_manager *
pb_slab_manager_create(struct pb_manager *provider,
pb_size bufSize,
pb_size slabSize,
const struct pb_desc *desc)
{
struct pb_slab_manager *mgr;
mgr = CALLOC_STRUCT(pb_slab_manager);
if (!mgr)
return NULL;
mgr->base.destroy = pb_slab_manager_destroy;
mgr->base.create_buffer = pb_slab_manager_create_buffer;
mgr->base.flush = pb_slab_manager_flush;
mgr->provider = provider;
mgr->bufSize = bufSize;
mgr->slabSize = slabSize;
mgr->desc = *desc;
LIST_INITHEAD(&mgr->slabs);
pipe_mutex_init(mgr->mutex);
return &mgr->base;
}
static struct pb_buffer *
pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
pb_size bufSize;
pb_size reqSize = size;
unsigned i;
if(desc->alignment > reqSize)
reqSize = desc->alignment;
bufSize = mgr->minBufSize;
for (i = 0; i < mgr->numBuckets; ++i) {
if(bufSize >= reqSize)
return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
bufSize *= 2;
}
/* Fall back to allocate a buffer object directly from the provider. */
return mgr->provider->create_buffer(mgr->provider, size, desc);
}
static void
pb_slab_range_manager_flush(struct pb_manager *_mgr)
{
struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
/* Individual slabs don't hold any temporary buffers so no need to call them */
assert(mgr->provider->flush);
if(mgr->provider->flush)
mgr->provider->flush(mgr->provider);
}
static void
pb_slab_range_manager_destroy(struct pb_manager *_mgr)
{
struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
unsigned i;
for (i = 0; i < mgr->numBuckets; ++i)
mgr->buckets[i]->destroy(mgr->buckets[i]);
FREE(mgr->buckets);
FREE(mgr->bucketSizes);
FREE(mgr);
}
struct pb_manager *
pb_slab_range_manager_create(struct pb_manager *provider,
pb_size minBufSize,
pb_size maxBufSize,
pb_size slabSize,
const struct pb_desc *desc)
{
struct pb_slab_range_manager *mgr;
pb_size bufSize;
unsigned i;
if(!provider)
return NULL;
mgr = CALLOC_STRUCT(pb_slab_range_manager);
if (!mgr)
goto out_err0;
mgr->base.destroy = pb_slab_range_manager_destroy;
mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
mgr->base.flush = pb_slab_range_manager_flush;
mgr->provider = provider;
mgr->minBufSize = minBufSize;
mgr->maxBufSize = maxBufSize;
mgr->numBuckets = 1;
bufSize = minBufSize;
while(bufSize < maxBufSize) {
bufSize *= 2;
++mgr->numBuckets;
}
mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
if (!mgr->buckets)
goto out_err1;
bufSize = minBufSize;
for (i = 0; i < mgr->numBuckets; ++i) {
mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
if(!mgr->buckets[i])
goto out_err2;
bufSize *= 2;
}
return &mgr->base;
out_err2:
for (i = 0; i < mgr->numBuckets; ++i)
if(mgr->buckets[i])
mgr->buckets[i]->destroy(mgr->buckets[i]);
FREE(mgr->buckets);
out_err1:
FREE(mgr);
out_err0:
return NULL;
}

View File

@ -0,0 +1,192 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* Buffer validation.
*
* @author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#include "pipe/p_compiler.h"
#include "pipe/p_defines.h"
#include "util/u_memory.h"
#include "util/u_debug.h"
#include "pb_buffer.h"
#include "pb_validate.h"
#define PB_VALIDATE_INITIAL_SIZE 1 /* 512 */
struct pb_validate_entry
{
struct pb_buffer *buf;
unsigned flags;
};
struct pb_validate
{
struct pb_validate_entry *entries;
unsigned used;
unsigned size;
};
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
struct pb_buffer *buf,
unsigned flags)
{
assert(buf);
if(!buf)
return PIPE_ERROR;
assert(flags & PB_USAGE_GPU_READ_WRITE);
assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
flags &= PB_USAGE_GPU_READ_WRITE;
/* We only need to store one reference for each buffer, so avoid storing
* consecutive references for the same buffer. It might not be the most
* common pattern, but it is easy to implement.
*/
if(vl->used && vl->entries[vl->used - 1].buf == buf) {
vl->entries[vl->used - 1].flags |= flags;
return PIPE_OK;
}
/* Grow the table */
if(vl->used == vl->size) {
unsigned new_size;
struct pb_validate_entry *new_entries;
new_size = vl->size * 2;
if(!new_size)
return PIPE_ERROR_OUT_OF_MEMORY;
new_entries = (struct pb_validate_entry *)REALLOC(vl->entries,
vl->size*sizeof(struct pb_validate_entry),
new_size*sizeof(struct pb_validate_entry));
if(!new_entries)
return PIPE_ERROR_OUT_OF_MEMORY;
memset(new_entries + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_validate_entry));
vl->size = new_size;
vl->entries = new_entries;
}
assert(!vl->entries[vl->used].buf);
pb_reference(&vl->entries[vl->used].buf, buf);
vl->entries[vl->used].flags = flags;
++vl->used;
return PIPE_OK;
}
enum pipe_error
pb_validate_foreach(struct pb_validate *vl,
enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
void *data)
{
unsigned i;
for(i = 0; i < vl->used; ++i) {
enum pipe_error ret;
ret = callback(vl->entries[i].buf, data);
if(ret != PIPE_OK)
return ret;
}
return PIPE_OK;
}
enum pipe_error
pb_validate_validate(struct pb_validate *vl)
{
unsigned i;
for(i = 0; i < vl->used; ++i) {
enum pipe_error ret;
ret = pb_validate(vl->entries[i].buf, vl, vl->entries[i].flags);
if(ret != PIPE_OK) {
while(i--)
pb_validate(vl->entries[i].buf, NULL, 0);
return ret;
}
}
return PIPE_OK;
}
void
pb_validate_fence(struct pb_validate *vl,
struct pipe_fence_handle *fence)
{
unsigned i;
for(i = 0; i < vl->used; ++i) {
pb_fence(vl->entries[i].buf, fence);
pb_reference(&vl->entries[i].buf, NULL);
}
vl->used = 0;
}
void
pb_validate_destroy(struct pb_validate *vl)
{
unsigned i;
for(i = 0; i < vl->used; ++i)
pb_reference(&vl->entries[i].buf, NULL);
FREE(vl->entries);
FREE(vl);
}
struct pb_validate *
pb_validate_create()
{
struct pb_validate *vl;
vl = CALLOC_STRUCT(pb_validate);
if(!vl)
return NULL;
vl->size = PB_VALIDATE_INITIAL_SIZE;
vl->entries = (struct pb_validate_entry *)CALLOC(vl->size, sizeof(struct pb_validate_entry));
if(!vl->entries) {
FREE(vl);
return NULL;
}
return vl;
}

View File

@ -0,0 +1,97 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* Buffer validation.
*
* @author Jose Fonseca <jrfonseca@tungstengraphics.com>
*/
#ifndef PB_VALIDATE_H_
#define PB_VALIDATE_H_
#include "pipe/p_compiler.h"
#include "pipe/p_defines.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pb_buffer;
struct pipe_fence_handle;
/**
* Buffer validation list.
*
* It holds a list of buffers to be validated and fenced when flushing.
*/
struct pb_validate;
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
struct pb_buffer *buf,
unsigned flags);
enum pipe_error
pb_validate_foreach(struct pb_validate *vl,
enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
void *data);
/**
* Validate all buffers for hardware access.
*
* Should be called right before issuing commands to the hardware.
*/
enum pipe_error
pb_validate_validate(struct pb_validate *vl);
/**
* Fence all buffers and clear the list.
*
* Should be called right after issuing commands to the hardware.
*/
void
pb_validate_fence(struct pb_validate *vl,
struct pipe_fence_handle *fence);
struct pb_validate *
pb_validate_create(void);
void
pb_validate_destroy(struct pb_validate *vl);
#ifdef __cplusplus
}
#endif
#endif /*PB_VALIDATE_H_*/

View File

@ -0,0 +1,67 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "pipe/p_config.h"
#include "rtasm_cpu.h"
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
#include "util/u_debug.h"
#include "util/u_cpu_detect.h"
DEBUG_GET_ONCE_BOOL_OPTION(nosse, "GALLIUM_NOSSE", FALSE);
static struct util_cpu_caps *get_cpu_caps(void)
{
util_cpu_detect();
return &util_cpu_caps;
}
int rtasm_cpu_has_sse(void)
{
return !debug_get_option_nosse() && get_cpu_caps()->has_sse;
}
int rtasm_cpu_has_sse2(void)
{
return !debug_get_option_nosse() && get_cpu_caps()->has_sse2;
}
#else
int rtasm_cpu_has_sse(void)
{
return 0;
}
int rtasm_cpu_has_sse2(void)
{
return 0;
}
#endif

View File

@ -0,0 +1,42 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* Runtime detection of CPU capabilities.
*/
#ifndef _RTASM_CPU_H_
#define _RTASM_CPU_H_
int rtasm_cpu_has_sse(void);
int rtasm_cpu_has_sse2(void);
#endif /* _RTASM_CPU_H_ */

View File

@ -0,0 +1,102 @@
/**************************************************************************
*
* Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file exemem.c
* Functions for allocating executable memory.
*
* \author Keith Whitwell
*/
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "os/os_thread.h"
#include "util/u_memory.h"
#include "rtasm_execmem.h"
#include "util/u_mm.h"
#define EXEC_HEAP_SIZE (4*1024*1024)
pipe_static_mutex(exec_mutex);
static struct mem_block *exec_heap = NULL;
static unsigned char *exec_mem = NULL;
static void
init_heap(void)
{
if (!exec_heap)
exec_heap = u_mmInit( 0, EXEC_HEAP_SIZE );
if (!exec_mem)
exec_mem = (unsigned char *) user_alloc(EXEC_HEAP_SIZE);
}
void *
rtasm_exec_malloc(size_t size)
{
struct mem_block *block = NULL;
void *addr = NULL;
pipe_mutex_lock(exec_mutex);
init_heap();
if (exec_heap) {
size = (size + 31) & ~31; /* next multiple of 32 bytes */
block = u_mmAllocMem( exec_heap, size, 5, 0 ); /* 5 -> 32-byte alignment */
}
if (block)
addr = exec_mem + block->ofs;
else
debug_printf("rtasm_exec_malloc failed\n");
pipe_mutex_unlock(exec_mutex);
return addr;
}
void
rtasm_exec_free(void *addr)
{
pipe_mutex_lock(exec_mutex);
if (exec_heap) {
struct mem_block *block = u_mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);
if (block)
u_mmFreeMem(block);
}
pipe_mutex_unlock(exec_mutex);
}

View File

@ -0,0 +1,46 @@
/**************************************************************************
*
* Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* \file exemem.c
* Functions for allocating executable memory.
*
* \author Keith Whitwell
*/
#ifndef _RTASM_EXECMEM_H_
#define _RTASM_EXECMEM_H_
#include "pipe/p_compiler.h"
extern void *
rtasm_exec_malloc( size_t size );
extern void
rtasm_exec_free( void *addr );
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,416 @@
/**************************************************************************
*
* Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _RTASM_X86SSE_H_
#define _RTASM_X86SSE_H_
#include "pipe/p_compiler.h"
#include "pipe/p_config.h"
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
/* It is up to the caller to ensure that instructions issued are
* suitable for the host cpu. There are no checks made in this module
* for mmx/sse/sse2 support on the cpu.
*/
struct x86_reg {
unsigned file:2;
unsigned idx:4;
unsigned mod:2; /* mod_REG if this is just a register */
int disp:24; /* only +/- 23bits of offset - should be enough... */
};
#define X86_MMX 1
#define X86_MMX2 2
#define X86_SSE 4
#define X86_SSE2 8
#define X86_SSE3 0x10
#define X86_SSE4_1 0x20
struct x86_function {
unsigned caps;
unsigned size;
unsigned char *store;
unsigned char *csr;
unsigned stack_offset:16;
unsigned need_emms:8;
int x87_stack:8;
unsigned char error_overflow[4];
};
enum x86_reg_file {
file_REG32,
file_MMX,
file_XMM,
file_x87
};
/* Values for mod field of modr/m byte
*/
enum x86_reg_mod {
mod_INDIRECT,
mod_DISP8,
mod_DISP32,
mod_REG
};
enum x86_reg_name {
reg_AX,
reg_CX,
reg_DX,
reg_BX,
reg_SP,
reg_BP,
reg_SI,
reg_DI,
reg_R8,
reg_R9,
reg_R10,
reg_R11,
reg_R12,
reg_R13,
reg_R14,
reg_R15
};
enum x86_cc {
cc_O, /* overflow */
cc_NO, /* not overflow */
cc_NAE, /* not above or equal / carry */
cc_AE, /* above or equal / not carry */
cc_E, /* equal / zero */
cc_NE /* not equal / not zero */
};
enum sse_cc {
cc_Equal,
cc_LessThan,
cc_LessThanEqual,
cc_Unordered,
cc_NotEqual,
cc_NotLessThan,
cc_NotLessThanEqual,
cc_Ordered
};
#define cc_Z cc_E
#define cc_NZ cc_NE
/** generic pointer to function */
typedef void (*x86_func)(void);
/* Begin/end/retrieve function creation:
*/
enum x86_target
{
X86_32,
X86_64_STD_ABI,
X86_64_WIN64_ABI
};
/* make this read a member of x86_function if target != host is desired */
static INLINE enum x86_target x86_target( struct x86_function* p )
{
#ifdef PIPE_ARCH_X86
return X86_32;
#elif defined(_WIN64)
return X86_64_WIN64_ABI;
#elif defined(PIPE_ARCH_X86_64)
return X86_64_STD_ABI;
#endif
}
static INLINE unsigned x86_target_caps( struct x86_function* p )
{
return p->caps;
}
void x86_init_func( struct x86_function *p );
void x86_init_func_size( struct x86_function *p, unsigned code_size );
void x86_release_func( struct x86_function *p );
x86_func x86_get_func( struct x86_function *p );
/* Debugging:
*/
void x86_print_reg( struct x86_reg reg );
/* Create and manipulate registers and regmem values:
*/
struct x86_reg x86_make_reg( enum x86_reg_file file,
enum x86_reg_name idx );
struct x86_reg x86_make_disp( struct x86_reg reg,
int disp );
struct x86_reg x86_deref( struct x86_reg reg );
struct x86_reg x86_get_base_reg( struct x86_reg reg );
/* Labels, jumps and fixup:
*/
int x86_get_label( struct x86_function *p );
void x64_rexw(struct x86_function *p);
void x86_jcc( struct x86_function *p,
enum x86_cc cc,
int label );
int x86_jcc_forward( struct x86_function *p,
enum x86_cc cc );
int x86_jmp_forward( struct x86_function *p);
int x86_call_forward( struct x86_function *p);
void x86_fixup_fwd_jump( struct x86_function *p,
int fixup );
void x86_jmp( struct x86_function *p, int label );
/* void x86_call( struct x86_function *p, void (*label)() ); */
void x86_call( struct x86_function *p, struct x86_reg reg);
void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm );
void x86_add_imm( struct x86_function *p, struct x86_reg dst, int imm );
void x86_or_imm( struct x86_function *p, struct x86_reg dst, int imm );
void x86_and_imm( struct x86_function *p, struct x86_reg dst, int imm );
void x86_sub_imm( struct x86_function *p, struct x86_reg dst, int imm );
void x86_xor_imm( struct x86_function *p, struct x86_reg dst, int imm );
void x86_cmp_imm( struct x86_function *p, struct x86_reg dst, int imm );
/* Macro for sse_shufps() and sse2_pshufd():
*/
#define SHUF(_x,_y,_z,_w) (((_x)<<0) | ((_y)<<2) | ((_z)<<4) | ((_w)<<6))
#define SHUF_NOOP RSW(0,1,2,3)
#define GET_SHUF(swz, idx) (((swz) >> ((idx)*2)) & 0x3)
void mmx_emms( struct x86_function *p );
void mmx_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void mmx_movq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void mmx_packssdw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void mmx_packuswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movdqu( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movdqa( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movsd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movupd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movapd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtps2dq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvttps2dq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtdq2ps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtsd2ss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtpd2ps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_packssdw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_packsswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_packuswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_pshufd( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
unsigned char shuf );
void sse2_pshuflw( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
unsigned char shuf );
void sse2_pshufhw( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
unsigned char shuf );
void sse2_rcpps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_rcpss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpcklbw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpcklwd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpckldq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpcklqdq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_psllw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_pslld_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psllq_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrlw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrld_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrlq_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psraw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrad_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_por( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_pshuflw( struct x86_function *p, struct x86_reg dst, struct x86_reg src, uint8_t imm );
void sse2_pshufhw( struct x86_function *p, struct x86_reg dst, struct x86_reg src, uint8_t imm );
void sse2_pshufd( struct x86_function *p, struct x86_reg dst, struct x86_reg src, uint8_t imm );
void sse_prefetchnta( struct x86_function *p, struct x86_reg ptr);
void sse_prefetch0( struct x86_function *p, struct x86_reg ptr);
void sse_prefetch1( struct x86_function *p, struct x86_reg ptr);
void sse_movntps( struct x86_function *p, struct x86_reg dst, struct x86_reg src);
void sse_addps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_addss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_cvtps2pi( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_divss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_andnps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_andps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_cmpps( struct x86_function *p, struct x86_reg dst, struct x86_reg src,
enum sse_cc cc );
void sse_maxps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_maxss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_minps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movaps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movhlps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movlhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movlps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movups( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_mulps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_mulss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_orps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_xorps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_subps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_rsqrtps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_rsqrtss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_shufps( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
unsigned char shuf );
void sse_unpckhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_unpcklps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_pmovmskb( struct x86_function *p, struct x86_reg dest, struct x86_reg src );
void sse_movmskps( struct x86_function *p, struct x86_reg dst, struct x86_reg src);
void x86_add( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_and( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_cmovcc( struct x86_function *p, struct x86_reg dst, struct x86_reg src, enum x86_cc cc );
void x86_cmp( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_dec( struct x86_function *p, struct x86_reg reg );
void x86_inc( struct x86_function *p, struct x86_reg reg );
void x86_lea( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x64_mov64( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov8( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov16( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_movzx8(struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_movzx16(struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov_imm(struct x86_function *p, struct x86_reg dst, int imm );
void x86_mov8_imm(struct x86_function *p, struct x86_reg dst, uint8_t imm );
void x86_mov16_imm(struct x86_function *p, struct x86_reg dst, uint16_t imm );
void x86_mul( struct x86_function *p, struct x86_reg src );
void x86_imul( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_or( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_pop( struct x86_function *p, struct x86_reg reg );
void x86_push( struct x86_function *p, struct x86_reg reg );
void x86_push_imm32( struct x86_function *p, int imm );
void x86_ret( struct x86_function *p );
void x86_retw( struct x86_function *p, unsigned short imm );
void x86_sub( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_test( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_xor( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_sahf( struct x86_function *p );
void x86_div( struct x86_function *p, struct x86_reg src );
void x86_bswap( struct x86_function *p, struct x86_reg src );
void x86_shr_imm( struct x86_function *p, struct x86_reg reg, unsigned imm );
void x86_sar_imm( struct x86_function *p, struct x86_reg reg, unsigned imm );
void x86_shl_imm( struct x86_function *p, struct x86_reg reg, unsigned imm );
void x86_cdecl_caller_push_regs( struct x86_function *p );
void x86_cdecl_caller_pop_regs( struct x86_function *p );
void x87_assert_stack_empty( struct x86_function *p );
void x87_f2xm1( struct x86_function *p );
void x87_fabs( struct x86_function *p );
void x87_fadd( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
void x87_faddp( struct x86_function *p, struct x86_reg dst );
void x87_fchs( struct x86_function *p );
void x87_fclex( struct x86_function *p );
void x87_fcmovb( struct x86_function *p, struct x86_reg src );
void x87_fcmovbe( struct x86_function *p, struct x86_reg src );
void x87_fcmove( struct x86_function *p, struct x86_reg src );
void x87_fcmovnb( struct x86_function *p, struct x86_reg src );
void x87_fcmovnbe( struct x86_function *p, struct x86_reg src );
void x87_fcmovne( struct x86_function *p, struct x86_reg src );
void x87_fcom( struct x86_function *p, struct x86_reg dst );
void x87_fcomi( struct x86_function *p, struct x86_reg dst );
void x87_fcomip( struct x86_function *p, struct x86_reg dst );
void x87_fcomp( struct x86_function *p, struct x86_reg dst );
void x87_fcos( struct x86_function *p );
void x87_fdiv( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
void x87_fdivp( struct x86_function *p, struct x86_reg dst );
void x87_fdivr( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
void x87_fdivrp( struct x86_function *p, struct x86_reg dst );
void x87_fild( struct x86_function *p, struct x86_reg arg );
void x87_fist( struct x86_function *p, struct x86_reg dst );
void x87_fistp( struct x86_function *p, struct x86_reg dst );
void x87_fld( struct x86_function *p, struct x86_reg arg );
void x87_fld1( struct x86_function *p );
void x87_fldcw( struct x86_function *p, struct x86_reg arg );
void x87_fldl2e( struct x86_function *p );
void x87_fldln2( struct x86_function *p );
void x87_fldz( struct x86_function *p );
void x87_fmul( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
void x87_fmulp( struct x86_function *p, struct x86_reg dst );
void x87_fnclex( struct x86_function *p );
void x87_fprndint( struct x86_function *p );
void x87_fpop( struct x86_function *p );
void x87_fscale( struct x86_function *p );
void x87_fsin( struct x86_function *p );
void x87_fsincos( struct x86_function *p );
void x87_fsqrt( struct x86_function *p );
void x87_fst( struct x86_function *p, struct x86_reg dst );
void x87_fstp( struct x86_function *p, struct x86_reg dst );
void x87_fsub( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
void x87_fsubp( struct x86_function *p, struct x86_reg dst );
void x87_fsubr( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
void x87_fsubrp( struct x86_function *p, struct x86_reg dst );
void x87_ftst( struct x86_function *p );
void x87_fxch( struct x86_function *p, struct x86_reg dst );
void x87_fxtract( struct x86_function *p );
void x87_fyl2x( struct x86_function *p );
void x87_fyl2xp1( struct x86_function *p );
void x87_fwait( struct x86_function *p );
void x87_fnstcw( struct x86_function *p, struct x86_reg dst );
void x87_fnstsw( struct x86_function *p, struct x86_reg dst );
void x87_fucompp( struct x86_function *p );
void x87_fucomp( struct x86_function *p, struct x86_reg arg );
void x87_fucom( struct x86_function *p, struct x86_reg arg );
/* Retrieve a reference to one of the function arguments, taking into
* account any push/pop activity. Note - doesn't track explicit
* manipulation of ESP by other instructions.
*/
struct x86_reg x86_fn_arg( struct x86_function *p, unsigned arg );
#endif
#endif

View File

@ -0,0 +1,55 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
*/
#include "pipe/p_config.h"
#include "pipe/p_state.h"
#include "translate.h"
struct translate *translate_create( const struct translate_key *key )
{
struct translate *translate = NULL;
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
translate = translate_sse2_create( key );
if (translate)
return translate;
#else
(void)translate;
#endif
return translate_generic_create( key );
}
boolean translate_is_output_format_supported(enum pipe_format format)
{
return translate_generic_is_output_format_supported(format);
}

View File

@ -0,0 +1,160 @@
/*
* Copyright 2008 Tungsten Graphics, inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* Vertex fetch/store/convert code. This functionality is used in two places:
* 1. Vertex fetch/convert - to grab vertex data from incoming vertex
* arrays and convert to format needed by vertex shaders.
* 2. Vertex store/emit - to convert simple float[][4] vertex attributes
* (which is the organization used throughout the draw/prim pipeline) to
* hardware-specific formats and emit into hardware vertex buffers.
*
*
* Authors:
* Keith Whitwell <keithw@tungstengraphics.com>
*/
#ifndef _TRANSLATE_H
#define _TRANSLATE_H
#include "pipe/p_compiler.h"
#include "pipe/p_format.h"
#include "pipe/p_state.h"
enum translate_element_type {
TRANSLATE_ELEMENT_NORMAL,
TRANSLATE_ELEMENT_INSTANCE_ID
};
struct translate_element
{
enum translate_element_type type;
enum pipe_format input_format;
enum pipe_format output_format;
unsigned input_buffer:8;
unsigned input_offset:24;
unsigned instance_divisor;
unsigned output_offset;
};
struct translate_key {
unsigned output_stride;
unsigned nr_elements;
struct translate_element element[PIPE_MAX_ATTRIBS + 1];
};
struct translate;
typedef void (PIPE_CDECL *run_elts_func)(struct translate *,
const unsigned *elts,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer);
typedef void (PIPE_CDECL *run_elts16_func)(struct translate *,
const uint16_t *elts,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer);
typedef void (PIPE_CDECL *run_elts8_func)(struct translate *,
const uint8_t *elts,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer);
typedef void (PIPE_CDECL *run_func)(struct translate *,
unsigned start,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer);
struct translate {
struct translate_key key;
void (*release)( struct translate * );
void (*set_buffer)( struct translate *,
unsigned i,
const void *ptr,
unsigned stride,
unsigned max_index );
run_elts_func run_elts;
run_elts16_func run_elts16;
run_elts8_func run_elts8;
run_func run;
};
struct translate *translate_create( const struct translate_key *key );
boolean translate_is_output_format_supported(enum pipe_format format);
static INLINE int translate_keysize( const struct translate_key *key )
{
return 2 * sizeof(int) + key->nr_elements * sizeof(struct translate_element);
}
static INLINE int translate_key_compare( const struct translate_key *a,
const struct translate_key *b )
{
int keysize_a = translate_keysize(a);
int keysize_b = translate_keysize(b);
if (keysize_a != keysize_b) {
return keysize_a - keysize_b;
}
return memcmp(a, b, keysize_a);
}
static INLINE void translate_key_sanitize( struct translate_key *a )
{
int keysize = translate_keysize(a);
char *ptr = (char *)a;
memset(ptr + keysize, 0, sizeof(*a) - keysize);
}
/*******************************************************************************
* Private:
*/
struct translate *translate_sse2_create( const struct translate_key *key );
struct translate *translate_generic_create( const struct translate_key *key );
boolean translate_generic_is_output_format_supported(enum pipe_format format);
#endif

View File

@ -0,0 +1,106 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "util/u_memory.h"
#include "pipe/p_state.h"
#include "translate.h"
#include "translate_cache.h"
#include "cso_cache/cso_cache.h"
#include "cso_cache/cso_hash.h"
struct translate_cache {
struct cso_hash *hash;
};
struct translate_cache * translate_cache_create( void )
{
struct translate_cache *cache = MALLOC_STRUCT(translate_cache);
if (cache == NULL) {
return NULL;
}
cache->hash = cso_hash_create();
return cache;
}
static INLINE void delete_translates(struct translate_cache *cache)
{
struct cso_hash *hash = cache->hash;
struct cso_hash_iter iter = cso_hash_first_node(hash);
while (!cso_hash_iter_is_null(iter)) {
struct translate *state = (struct translate*)cso_hash_iter_data(iter);
iter = cso_hash_iter_next(iter);
if (state) {
state->release(state);
}
}
}
void translate_cache_destroy(struct translate_cache *cache)
{
delete_translates(cache);
cso_hash_delete(cache->hash);
FREE(cache);
}
static INLINE unsigned translate_hash_key_size(struct translate_key *key)
{
unsigned size = sizeof(struct translate_key) -
sizeof(struct translate_element) * (PIPE_MAX_ATTRIBS - key->nr_elements);
return size;
}
static INLINE unsigned create_key(struct translate_key *key)
{
unsigned hash_key;
unsigned size = translate_hash_key_size(key);
/*debug_printf("key size = %d, (els = %d)\n",
size, key->nr_elements);*/
hash_key = cso_construct_key(key, size);
return hash_key;
}
struct translate * translate_cache_find(struct translate_cache *cache,
struct translate_key *key)
{
unsigned hash_key = create_key(key);
struct translate *translate = (struct translate*)
cso_hash_find_data_from_template(cache->hash,
hash_key,
key, sizeof(*key));
if (!translate) {
/* create/insert */
translate = translate_create(key);
cso_hash_insert(cache->hash, hash_key, translate);
}
return translate;
}

View File

@ -0,0 +1,54 @@
/*
* Copyright 2008 Tungsten Graphics, inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _TRANSLATE_CACHE_H
#define _TRANSLATE_CACHE_H
/*******************************************************************************
* Translate cache.
* Simply used to cache created translates. Avoids unecessary creation of
* translate's if one suitable for a given translate_key has already been
* created.
*
* Note: this functionality depends and requires the CSO module.
*/
struct translate_cache;
struct translate_key;
struct translate;
struct translate_cache *translate_cache_create( void );
void translate_cache_destroy(struct translate_cache *cache);
/**
* Will try to find a translate structure matched by the given key.
* If such a structure doesn't exist in the cache the function
* will automatically create it, insert it in the cache and
* return the created version.
*
*/
struct translate *translate_cache_find(struct translate_cache *cache,
struct translate_key *key);
#endif

View File

@ -0,0 +1,998 @@
/**************************************************************************
*
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
*/
#include "util/u_memory.h"
#include "util/u_format.h"
#include "util/u_half.h"
#include "util/u_math.h"
#include "pipe/p_state.h"
#include "translate.h"
#define DRAW_DBG 0
typedef void (*fetch_func)(void *dst,
const uint8_t *src,
unsigned i, unsigned j);
typedef void (*emit_func)(const void *attrib, void *ptr);
struct translate_generic {
struct translate translate;
struct {
enum translate_element_type type;
fetch_func fetch;
unsigned buffer;
unsigned input_offset;
unsigned instance_divisor;
emit_func emit;
unsigned output_offset;
const uint8_t *input_ptr;
unsigned input_stride;
unsigned max_index;
/* this value is set to -1 if this is a normal element with output_format != input_format:
* in this case, u_format is used to do a full conversion
*
* this value is set to the format size in bytes if output_format == input_format or for 32-bit instance ids:
* in this case, memcpy is used to copy this amount of bytes
*/
int copy_size;
} attrib[PIPE_MAX_ATTRIBS];
unsigned nr_attrib;
};
static struct translate_generic *translate_generic( struct translate *translate )
{
return (struct translate_generic *)translate;
}
/**
* Fetch a dword[4] vertex attribute from memory, doing format/type
* conversion as needed.
*
* This is probably needed/dupliocated elsewhere, eg format
* conversion, texture sampling etc.
*/
#define ATTRIB( NAME, SZ, SRCTYPE, DSTTYPE, TO ) \
static void \
emit_##NAME(const void *attrib, void *ptr) \
{ \
unsigned i; \
SRCTYPE *in = (SRCTYPE *)attrib; \
DSTTYPE *out = (DSTTYPE *)ptr; \
\
for (i = 0; i < SZ; i++) { \
out[i] = TO(in[i]); \
} \
}
#define TO_64_FLOAT(x) ((double) x)
#define TO_32_FLOAT(x) (x)
#define TO_16_FLOAT(x) util_float_to_half(x)
#define TO_8_USCALED(x) ((unsigned char) x)
#define TO_16_USCALED(x) ((unsigned short) x)
#define TO_32_USCALED(x) ((unsigned int) x)
#define TO_8_SSCALED(x) ((char) x)
#define TO_16_SSCALED(x) ((short) x)
#define TO_32_SSCALED(x) ((int) x)
#define TO_8_UNORM(x) ((unsigned char) (x * 255.0f))
#define TO_16_UNORM(x) ((unsigned short) (x * 65535.0f))
#define TO_32_UNORM(x) ((unsigned int) (x * 4294967295.0f))
#define TO_8_SNORM(x) ((char) (x * 127.0f))
#define TO_16_SNORM(x) ((short) (x * 32767.0f))
#define TO_32_SNORM(x) ((int) (x * 2147483647.0f))
#define TO_32_FIXED(x) ((int) (x * 65536.0f))
#define TO_INT(x) (x)
ATTRIB( R64G64B64A64_FLOAT, 4, float, double, TO_64_FLOAT )
ATTRIB( R64G64B64_FLOAT, 3, float, double, TO_64_FLOAT )
ATTRIB( R64G64_FLOAT, 2, float, double, TO_64_FLOAT )
ATTRIB( R64_FLOAT, 1, float, double, TO_64_FLOAT )
ATTRIB( R32G32B32A32_FLOAT, 4, float, float, TO_32_FLOAT )
ATTRIB( R32G32B32_FLOAT, 3, float, float, TO_32_FLOAT )
ATTRIB( R32G32_FLOAT, 2, float, float, TO_32_FLOAT )
ATTRIB( R32_FLOAT, 1, float, float, TO_32_FLOAT )
ATTRIB( R16G16B16A16_FLOAT, 4, float, ushort, TO_16_FLOAT )
ATTRIB( R16G16B16_FLOAT, 3, float, ushort, TO_16_FLOAT )
ATTRIB( R16G16_FLOAT, 2, float, ushort, TO_16_FLOAT )
ATTRIB( R16_FLOAT, 1, float, ushort, TO_16_FLOAT )
ATTRIB( R32G32B32A32_USCALED, 4, float, unsigned, TO_32_USCALED )
ATTRIB( R32G32B32_USCALED, 3, float, unsigned, TO_32_USCALED )
ATTRIB( R32G32_USCALED, 2, float, unsigned, TO_32_USCALED )
ATTRIB( R32_USCALED, 1, float, unsigned, TO_32_USCALED )
ATTRIB( R32G32B32A32_SSCALED, 4, float, int, TO_32_SSCALED )
ATTRIB( R32G32B32_SSCALED, 3, float, int, TO_32_SSCALED )
ATTRIB( R32G32_SSCALED, 2, float, int, TO_32_SSCALED )
ATTRIB( R32_SSCALED, 1, float, int, TO_32_SSCALED )
ATTRIB( R32G32B32A32_UNORM, 4, float, unsigned, TO_32_UNORM )
ATTRIB( R32G32B32_UNORM, 3, float, unsigned, TO_32_UNORM )
ATTRIB( R32G32_UNORM, 2, float, unsigned, TO_32_UNORM )
ATTRIB( R32_UNORM, 1, float, unsigned, TO_32_UNORM )
ATTRIB( R32G32B32A32_SNORM, 4, float, int, TO_32_SNORM )
ATTRIB( R32G32B32_SNORM, 3, float, int, TO_32_SNORM )
ATTRIB( R32G32_SNORM, 2, float, int, TO_32_SNORM )
ATTRIB( R32_SNORM, 1, float, int, TO_32_SNORM )
ATTRIB( R16G16B16A16_USCALED, 4, float, ushort, TO_16_USCALED )
ATTRIB( R16G16B16_USCALED, 3, float, ushort, TO_16_USCALED )
ATTRIB( R16G16_USCALED, 2, float, ushort, TO_16_USCALED )
ATTRIB( R16_USCALED, 1, float, ushort, TO_16_USCALED )
ATTRIB( R16G16B16A16_SSCALED, 4, float, short, TO_16_SSCALED )
ATTRIB( R16G16B16_SSCALED, 3, float, short, TO_16_SSCALED )
ATTRIB( R16G16_SSCALED, 2, float, short, TO_16_SSCALED )
ATTRIB( R16_SSCALED, 1, float, short, TO_16_SSCALED )
ATTRIB( R16G16B16A16_UNORM, 4, float, ushort, TO_16_UNORM )
ATTRIB( R16G16B16_UNORM, 3, float, ushort, TO_16_UNORM )
ATTRIB( R16G16_UNORM, 2, float, ushort, TO_16_UNORM )
ATTRIB( R16_UNORM, 1, float, ushort, TO_16_UNORM )
ATTRIB( R16G16B16A16_SNORM, 4, float, short, TO_16_SNORM )
ATTRIB( R16G16B16_SNORM, 3, float, short, TO_16_SNORM )
ATTRIB( R16G16_SNORM, 2, float, short, TO_16_SNORM )
ATTRIB( R16_SNORM, 1, float, short, TO_16_SNORM )
ATTRIB( R8G8B8A8_USCALED, 4, float, ubyte, TO_8_USCALED )
ATTRIB( R8G8B8_USCALED, 3, float, ubyte, TO_8_USCALED )
ATTRIB( R8G8_USCALED, 2, float, ubyte, TO_8_USCALED )
ATTRIB( R8_USCALED, 1, float, ubyte, TO_8_USCALED )
ATTRIB( R8G8B8A8_SSCALED, 4, float, char, TO_8_SSCALED )
ATTRIB( R8G8B8_SSCALED, 3, float, char, TO_8_SSCALED )
ATTRIB( R8G8_SSCALED, 2, float, char, TO_8_SSCALED )
ATTRIB( R8_SSCALED, 1, float, char, TO_8_SSCALED )
ATTRIB( R8G8B8A8_UNORM, 4, float, ubyte, TO_8_UNORM )
ATTRIB( R8G8B8_UNORM, 3, float, ubyte, TO_8_UNORM )
ATTRIB( R8G8_UNORM, 2, float, ubyte, TO_8_UNORM )
ATTRIB( R8_UNORM, 1, float, ubyte, TO_8_UNORM )
ATTRIB( R8G8B8A8_SNORM, 4, float, char, TO_8_SNORM )
ATTRIB( R8G8B8_SNORM, 3, float, char, TO_8_SNORM )
ATTRIB( R8G8_SNORM, 2, float, char, TO_8_SNORM )
ATTRIB( R8_SNORM, 1, float, char, TO_8_SNORM )
ATTRIB( R32G32B32A32_UINT, 4, uint32_t, unsigned, TO_INT )
ATTRIB( R32G32B32_UINT, 3, uint32_t, unsigned, TO_INT )
ATTRIB( R32G32_UINT, 2, uint32_t, unsigned, TO_INT )
ATTRIB( R32_UINT, 1, uint32_t, unsigned, TO_INT )
ATTRIB( R16G16B16A16_UINT, 4, uint32_t, ushort, TO_INT )
ATTRIB( R16G16B16_UINT, 3, uint32_t, ushort, TO_INT )
ATTRIB( R16G16_UINT, 2, uint32_t, ushort, TO_INT )
ATTRIB( R16_UINT, 1, uint32_t, ushort, TO_INT )
ATTRIB( R8G8B8A8_UINT, 4, uint32_t, ubyte, TO_INT )
ATTRIB( R8G8B8_UINT, 3, uint32_t, ubyte, TO_INT )
ATTRIB( R8G8_UINT, 2, uint32_t, ubyte, TO_INT )
ATTRIB( R8_UINT, 1, uint32_t, ubyte, TO_INT )
ATTRIB( R32G32B32A32_SINT, 4, int32_t, int, TO_INT )
ATTRIB( R32G32B32_SINT, 3, int32_t, int, TO_INT )
ATTRIB( R32G32_SINT, 2, int32_t, int, TO_INT )
ATTRIB( R32_SINT, 1, int32_t, int, TO_INT )
ATTRIB( R16G16B16A16_SINT, 4, int32_t, short, TO_INT )
ATTRIB( R16G16B16_SINT, 3, int32_t, short, TO_INT )
ATTRIB( R16G16_SINT, 2, int32_t, short, TO_INT )
ATTRIB( R16_SINT, 1, int32_t, short, TO_INT )
ATTRIB( R8G8B8A8_SINT, 4, int32_t, char, TO_INT )
ATTRIB( R8G8B8_SINT, 3, int32_t, char, TO_INT )
ATTRIB( R8G8_SINT, 2, int32_t, char, TO_INT )
ATTRIB( R8_SINT, 1, int32_t, char, TO_INT )
static void
emit_A8R8G8B8_UNORM( const void *attrib, void *ptr)
{
float *in = (float *)attrib;
ubyte *out = (ubyte *)ptr;
out[0] = TO_8_UNORM(in[3]);
out[1] = TO_8_UNORM(in[0]);
out[2] = TO_8_UNORM(in[1]);
out[3] = TO_8_UNORM(in[2]);
}
static void
emit_B8G8R8A8_UNORM( const void *attrib, void *ptr)
{
float *in = (float *)attrib;
ubyte *out = (ubyte *)ptr;
out[2] = TO_8_UNORM(in[0]);
out[1] = TO_8_UNORM(in[1]);
out[0] = TO_8_UNORM(in[2]);
out[3] = TO_8_UNORM(in[3]);
}
static void
emit_B10G10R10A2_UNORM( const void *attrib, void *ptr )
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= ((uint32_t)(CLAMP(src[2], 0, 1) * 0x3ff)) & 0x3ff;
value |= (((uint32_t)(CLAMP(src[1], 0, 1) * 0x3ff)) & 0x3ff) << 10;
value |= (((uint32_t)(CLAMP(src[0], 0, 1) * 0x3ff)) & 0x3ff) << 20;
value |= ((uint32_t)(CLAMP(src[3], 0, 1) * 0x3)) << 30;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_B10G10R10A2_USCALED( const void *attrib, void *ptr )
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= ((uint32_t)CLAMP(src[2], 0, 1023)) & 0x3ff;
value |= (((uint32_t)CLAMP(src[1], 0, 1023)) & 0x3ff) << 10;
value |= (((uint32_t)CLAMP(src[0], 0, 1023)) & 0x3ff) << 20;
value |= ((uint32_t)CLAMP(src[3], 0, 3)) << 30;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_B10G10R10A2_SNORM( const void *attrib, void *ptr )
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= (uint32_t)(((uint32_t)(CLAMP(src[2], -1, 1) * 0x1ff)) & 0x3ff) ;
value |= (uint32_t)((((uint32_t)(CLAMP(src[1], -1, 1) * 0x1ff)) & 0x3ff) << 10) ;
value |= (uint32_t)((((uint32_t)(CLAMP(src[0], -1, 1) * 0x1ff)) & 0x3ff) << 20) ;
value |= (uint32_t)(((uint32_t)(CLAMP(src[3], -1, 1) * 0x1)) << 30) ;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_B10G10R10A2_SSCALED( const void *attrib, void *ptr )
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= (uint32_t)(((uint32_t)CLAMP(src[2], -512, 511)) & 0x3ff) ;
value |= (uint32_t)((((uint32_t)CLAMP(src[1], -512, 511)) & 0x3ff) << 10) ;
value |= (uint32_t)((((uint32_t)CLAMP(src[0], -512, 511)) & 0x3ff) << 20) ;
value |= (uint32_t)(((uint32_t)CLAMP(src[3], -2, 1)) << 30) ;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_R10G10B10A2_UNORM( const void *attrib, void *ptr )
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= ((uint32_t)(CLAMP(src[0], 0, 1) * 0x3ff)) & 0x3ff;
value |= (((uint32_t)(CLAMP(src[1], 0, 1) * 0x3ff)) & 0x3ff) << 10;
value |= (((uint32_t)(CLAMP(src[2], 0, 1) * 0x3ff)) & 0x3ff) << 20;
value |= ((uint32_t)(CLAMP(src[3], 0, 1) * 0x3)) << 30;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_R10G10B10A2_USCALED( const void *attrib, void *ptr )
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= ((uint32_t)CLAMP(src[0], 0, 1023)) & 0x3ff;
value |= (((uint32_t)CLAMP(src[1], 0, 1023)) & 0x3ff) << 10;
value |= (((uint32_t)CLAMP(src[2], 0, 1023)) & 0x3ff) << 20;
value |= ((uint32_t)CLAMP(src[3], 0, 3)) << 30;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_R10G10B10A2_SNORM( const void *attrib, void *ptr )
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= (uint32_t)(((uint32_t)(CLAMP(src[0], -1, 1) * 0x1ff)) & 0x3ff) ;
value |= (uint32_t)((((uint32_t)(CLAMP(src[1], -1, 1) * 0x1ff)) & 0x3ff) << 10) ;
value |= (uint32_t)((((uint32_t)(CLAMP(src[2], -1, 1) * 0x1ff)) & 0x3ff) << 20) ;
value |= (uint32_t)(((uint32_t)(CLAMP(src[3], -1, 1) * 0x1)) << 30) ;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_R10G10B10A2_SSCALED( const void *attrib, void *ptr)
{
float *src = (float *)ptr;
uint32_t value = 0;
value |= (uint32_t)(((uint32_t)CLAMP(src[0], -512, 511)) & 0x3ff) ;
value |= (uint32_t)((((uint32_t)CLAMP(src[1], -512, 511)) & 0x3ff) << 10) ;
value |= (uint32_t)((((uint32_t)CLAMP(src[2], -512, 511)) & 0x3ff) << 20) ;
value |= (uint32_t)(((uint32_t)CLAMP(src[3], -2, 1)) << 30) ;
#ifdef PIPE_ARCH_BIG_ENDIAN
value = util_bswap32(value);
#endif
*(uint32_t *)attrib = value;
}
static void
emit_NULL( const void *attrib, void *ptr )
{
/* do nothing is the only sensible option */
}
static emit_func get_emit_func( enum pipe_format format )
{
switch (format) {
case PIPE_FORMAT_R64_FLOAT:
return &emit_R64_FLOAT;
case PIPE_FORMAT_R64G64_FLOAT:
return &emit_R64G64_FLOAT;
case PIPE_FORMAT_R64G64B64_FLOAT:
return &emit_R64G64B64_FLOAT;
case PIPE_FORMAT_R64G64B64A64_FLOAT:
return &emit_R64G64B64A64_FLOAT;
case PIPE_FORMAT_R32_FLOAT:
return &emit_R32_FLOAT;
case PIPE_FORMAT_R32G32_FLOAT:
return &emit_R32G32_FLOAT;
case PIPE_FORMAT_R32G32B32_FLOAT:
return &emit_R32G32B32_FLOAT;
case PIPE_FORMAT_R32G32B32A32_FLOAT:
return &emit_R32G32B32A32_FLOAT;
case PIPE_FORMAT_R16_FLOAT:
return &emit_R16_FLOAT;
case PIPE_FORMAT_R16G16_FLOAT:
return &emit_R16G16_FLOAT;
case PIPE_FORMAT_R16G16B16_FLOAT:
return &emit_R16G16B16_FLOAT;
case PIPE_FORMAT_R16G16B16A16_FLOAT:
return &emit_R16G16B16A16_FLOAT;
case PIPE_FORMAT_R32_UNORM:
return &emit_R32_UNORM;
case PIPE_FORMAT_R32G32_UNORM:
return &emit_R32G32_UNORM;
case PIPE_FORMAT_R32G32B32_UNORM:
return &emit_R32G32B32_UNORM;
case PIPE_FORMAT_R32G32B32A32_UNORM:
return &emit_R32G32B32A32_UNORM;
case PIPE_FORMAT_R32_USCALED:
return &emit_R32_USCALED;
case PIPE_FORMAT_R32G32_USCALED:
return &emit_R32G32_USCALED;
case PIPE_FORMAT_R32G32B32_USCALED:
return &emit_R32G32B32_USCALED;
case PIPE_FORMAT_R32G32B32A32_USCALED:
return &emit_R32G32B32A32_USCALED;
case PIPE_FORMAT_R32_SNORM:
return &emit_R32_SNORM;
case PIPE_FORMAT_R32G32_SNORM:
return &emit_R32G32_SNORM;
case PIPE_FORMAT_R32G32B32_SNORM:
return &emit_R32G32B32_SNORM;
case PIPE_FORMAT_R32G32B32A32_SNORM:
return &emit_R32G32B32A32_SNORM;
case PIPE_FORMAT_R32_SSCALED:
return &emit_R32_SSCALED;
case PIPE_FORMAT_R32G32_SSCALED:
return &emit_R32G32_SSCALED;
case PIPE_FORMAT_R32G32B32_SSCALED:
return &emit_R32G32B32_SSCALED;
case PIPE_FORMAT_R32G32B32A32_SSCALED:
return &emit_R32G32B32A32_SSCALED;
case PIPE_FORMAT_R16_UNORM:
return &emit_R16_UNORM;
case PIPE_FORMAT_R16G16_UNORM:
return &emit_R16G16_UNORM;
case PIPE_FORMAT_R16G16B16_UNORM:
return &emit_R16G16B16_UNORM;
case PIPE_FORMAT_R16G16B16A16_UNORM:
return &emit_R16G16B16A16_UNORM;
case PIPE_FORMAT_R16_USCALED:
return &emit_R16_USCALED;
case PIPE_FORMAT_R16G16_USCALED:
return &emit_R16G16_USCALED;
case PIPE_FORMAT_R16G16B16_USCALED:
return &emit_R16G16B16_USCALED;
case PIPE_FORMAT_R16G16B16A16_USCALED:
return &emit_R16G16B16A16_USCALED;
case PIPE_FORMAT_R16_SNORM:
return &emit_R16_SNORM;
case PIPE_FORMAT_R16G16_SNORM:
return &emit_R16G16_SNORM;
case PIPE_FORMAT_R16G16B16_SNORM:
return &emit_R16G16B16_SNORM;
case PIPE_FORMAT_R16G16B16A16_SNORM:
return &emit_R16G16B16A16_SNORM;
case PIPE_FORMAT_R16_SSCALED:
return &emit_R16_SSCALED;
case PIPE_FORMAT_R16G16_SSCALED:
return &emit_R16G16_SSCALED;
case PIPE_FORMAT_R16G16B16_SSCALED:
return &emit_R16G16B16_SSCALED;
case PIPE_FORMAT_R16G16B16A16_SSCALED:
return &emit_R16G16B16A16_SSCALED;
case PIPE_FORMAT_R8_UNORM:
return &emit_R8_UNORM;
case PIPE_FORMAT_R8G8_UNORM:
return &emit_R8G8_UNORM;
case PIPE_FORMAT_R8G8B8_UNORM:
return &emit_R8G8B8_UNORM;
case PIPE_FORMAT_R8G8B8A8_UNORM:
return &emit_R8G8B8A8_UNORM;
case PIPE_FORMAT_R8_USCALED:
return &emit_R8_USCALED;
case PIPE_FORMAT_R8G8_USCALED:
return &emit_R8G8_USCALED;
case PIPE_FORMAT_R8G8B8_USCALED:
return &emit_R8G8B8_USCALED;
case PIPE_FORMAT_R8G8B8A8_USCALED:
return &emit_R8G8B8A8_USCALED;
case PIPE_FORMAT_R8_SNORM:
return &emit_R8_SNORM;
case PIPE_FORMAT_R8G8_SNORM:
return &emit_R8G8_SNORM;
case PIPE_FORMAT_R8G8B8_SNORM:
return &emit_R8G8B8_SNORM;
case PIPE_FORMAT_R8G8B8A8_SNORM:
return &emit_R8G8B8A8_SNORM;
case PIPE_FORMAT_R8_SSCALED:
return &emit_R8_SSCALED;
case PIPE_FORMAT_R8G8_SSCALED:
return &emit_R8G8_SSCALED;
case PIPE_FORMAT_R8G8B8_SSCALED:
return &emit_R8G8B8_SSCALED;
case PIPE_FORMAT_R8G8B8A8_SSCALED:
return &emit_R8G8B8A8_SSCALED;
case PIPE_FORMAT_B8G8R8A8_UNORM:
return &emit_B8G8R8A8_UNORM;
case PIPE_FORMAT_A8R8G8B8_UNORM:
return &emit_A8R8G8B8_UNORM;
case PIPE_FORMAT_R32_UINT:
return &emit_R32_UINT;
case PIPE_FORMAT_R32G32_UINT:
return &emit_R32G32_UINT;
case PIPE_FORMAT_R32G32B32_UINT:
return &emit_R32G32B32_UINT;
case PIPE_FORMAT_R32G32B32A32_UINT:
return &emit_R32G32B32A32_UINT;
case PIPE_FORMAT_R16_UINT:
return &emit_R16_UINT;
case PIPE_FORMAT_R16G16_UINT:
return &emit_R16G16_UINT;
case PIPE_FORMAT_R16G16B16_UINT:
return &emit_R16G16B16_UINT;
case PIPE_FORMAT_R16G16B16A16_UINT:
return &emit_R16G16B16A16_UINT;
case PIPE_FORMAT_R8_UINT:
return &emit_R8_UINT;
case PIPE_FORMAT_R8G8_UINT:
return &emit_R8G8_UINT;
case PIPE_FORMAT_R8G8B8_UINT:
return &emit_R8G8B8_UINT;
case PIPE_FORMAT_R8G8B8A8_UINT:
return &emit_R8G8B8A8_UINT;
case PIPE_FORMAT_R32_SINT:
return &emit_R32_SINT;
case PIPE_FORMAT_R32G32_SINT:
return &emit_R32G32_SINT;
case PIPE_FORMAT_R32G32B32_SINT:
return &emit_R32G32B32_SINT;
case PIPE_FORMAT_R32G32B32A32_SINT:
return &emit_R32G32B32A32_SINT;
case PIPE_FORMAT_R16_SINT:
return &emit_R16_SINT;
case PIPE_FORMAT_R16G16_SINT:
return &emit_R16G16_SINT;
case PIPE_FORMAT_R16G16B16_SINT:
return &emit_R16G16B16_SINT;
case PIPE_FORMAT_R16G16B16A16_SINT:
return &emit_R16G16B16A16_SINT;
case PIPE_FORMAT_R8_SINT:
return &emit_R8_SINT;
case PIPE_FORMAT_R8G8_SINT:
return &emit_R8G8_SINT;
case PIPE_FORMAT_R8G8B8_SINT:
return &emit_R8G8B8_SINT;
case PIPE_FORMAT_R8G8B8A8_SINT:
return &emit_R8G8B8A8_SINT;
case PIPE_FORMAT_B10G10R10A2_UNORM:
return &emit_B10G10R10A2_UNORM;
case PIPE_FORMAT_B10G10R10A2_USCALED:
return &emit_B10G10R10A2_USCALED;
case PIPE_FORMAT_B10G10R10A2_SNORM:
return &emit_B10G10R10A2_SNORM;
case PIPE_FORMAT_B10G10R10A2_SSCALED:
return &emit_B10G10R10A2_SSCALED;
case PIPE_FORMAT_R10G10B10A2_UNORM:
return &emit_R10G10B10A2_UNORM;
case PIPE_FORMAT_R10G10B10A2_USCALED:
return &emit_R10G10B10A2_USCALED;
case PIPE_FORMAT_R10G10B10A2_SNORM:
return &emit_R10G10B10A2_SNORM;
case PIPE_FORMAT_R10G10B10A2_SSCALED:
return &emit_R10G10B10A2_SSCALED;
default:
assert(0);
return &emit_NULL;
}
}
static ALWAYS_INLINE void PIPE_CDECL generic_run_one( struct translate_generic *tg,
unsigned elt,
unsigned start_instance,
unsigned instance_id,
void *vert )
{
unsigned nr_attrs = tg->nr_attrib;
unsigned attr;
for (attr = 0; attr < nr_attrs; attr++) {
float data[4];
uint8_t *dst = (uint8_t *)vert + tg->attrib[attr].output_offset;
if (tg->attrib[attr].type == TRANSLATE_ELEMENT_NORMAL) {
const uint8_t *src;
unsigned index;
int copy_size;
if (tg->attrib[attr].instance_divisor) {
index = start_instance;
index += (instance_id - start_instance) /
tg->attrib[attr].instance_divisor;
/* XXX we need to clamp the index here too, but to a
* per-array max value, not the draw->pt.max_index value
* that's being given to us via translate->set_buffer().
*/
}
else {
index = elt;
/* clamp to avoid going out of bounds */
index = MIN2(index, tg->attrib[attr].max_index);
}
src = tg->attrib[attr].input_ptr +
tg->attrib[attr].input_stride * index;
copy_size = tg->attrib[attr].copy_size;
if(likely(copy_size >= 0))
memcpy(dst, src, copy_size);
else
{
tg->attrib[attr].fetch( data, src, 0, 0 );
if (0)
debug_printf("Fetch linear attr %d from %p stride %d index %d: "
" %f, %f, %f, %f \n",
attr,
tg->attrib[attr].input_ptr,
tg->attrib[attr].input_stride,
index,
data[0], data[1],data[2], data[3]);
tg->attrib[attr].emit( data, dst );
}
} else {
if(likely(tg->attrib[attr].copy_size >= 0))
memcpy(data, &instance_id, 4);
else
{
data[0] = (float)instance_id;
tg->attrib[attr].emit( data, dst );
}
}
}
}
/**
* Fetch vertex attributes for 'count' vertices.
*/
static void PIPE_CDECL generic_run_elts( struct translate *translate,
const unsigned *elts,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer )
{
struct translate_generic *tg = translate_generic(translate);
char *vert = output_buffer;
unsigned i;
for (i = 0; i < count; i++) {
generic_run_one(tg, *elts++, start_instance, instance_id, vert);
vert += tg->translate.key.output_stride;
}
}
static void PIPE_CDECL generic_run_elts16( struct translate *translate,
const uint16_t *elts,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer )
{
struct translate_generic *tg = translate_generic(translate);
char *vert = output_buffer;
unsigned i;
for (i = 0; i < count; i++) {
generic_run_one(tg, *elts++, start_instance, instance_id, vert);
vert += tg->translate.key.output_stride;
}
}
static void PIPE_CDECL generic_run_elts8( struct translate *translate,
const uint8_t *elts,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer )
{
struct translate_generic *tg = translate_generic(translate);
char *vert = output_buffer;
unsigned i;
for (i = 0; i < count; i++) {
generic_run_one(tg, *elts++, start_instance, instance_id, vert);
vert += tg->translate.key.output_stride;
}
}
static void PIPE_CDECL generic_run( struct translate *translate,
unsigned start,
unsigned count,
unsigned start_instance,
unsigned instance_id,
void *output_buffer )
{
struct translate_generic *tg = translate_generic(translate);
char *vert = output_buffer;
unsigned i;
for (i = 0; i < count; i++) {
generic_run_one(tg, start + i, start_instance, instance_id, vert);
vert += tg->translate.key.output_stride;
}
}
static void generic_set_buffer( struct translate *translate,
unsigned buf,
const void *ptr,
unsigned stride,
unsigned max_index )
{
struct translate_generic *tg = translate_generic(translate);
unsigned i;
for (i = 0; i < tg->nr_attrib; i++) {
if (tg->attrib[i].buffer == buf) {
tg->attrib[i].input_ptr = ((const uint8_t *)ptr +
tg->attrib[i].input_offset);
tg->attrib[i].input_stride = stride;
tg->attrib[i].max_index = max_index;
}
}
}
static void generic_release( struct translate *translate )
{
/* Refcount?
*/
FREE(translate);
}
static boolean
is_legal_int_format_combo( const struct util_format_description *src,
const struct util_format_description *dst )
{
unsigned i;
unsigned nr = MIN2(src->nr_channels, dst->nr_channels);
for (i = 0; i < nr; i++) {
/* The signs must match. */
if (src->channel[i].type != dst->channel[i].type) {
return FALSE;
}
/* Integers must not lose precision at any point in the pipeline. */
if (src->channel[i].size > dst->channel[i].size) {
return FALSE;
}
}
return TRUE;
}
struct translate *translate_generic_create( const struct translate_key *key )
{
struct translate_generic *tg = CALLOC_STRUCT(translate_generic);
unsigned i;
if (tg == NULL)
return NULL;
tg->translate.key = *key;
tg->translate.release = generic_release;
tg->translate.set_buffer = generic_set_buffer;
tg->translate.run_elts = generic_run_elts;
tg->translate.run_elts16 = generic_run_elts16;
tg->translate.run_elts8 = generic_run_elts8;
tg->translate.run = generic_run;
for (i = 0; i < key->nr_elements; i++) {
const struct util_format_description *format_desc =
util_format_description(key->element[i].input_format);
assert(format_desc);
tg->attrib[i].type = key->element[i].type;
if (format_desc->channel[0].pure_integer) {
const struct util_format_description *out_format_desc =
util_format_description(key->element[i].output_format);
if (!is_legal_int_format_combo(format_desc, out_format_desc)) {
FREE(tg);
return NULL;
}
if (format_desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED) {
assert(format_desc->fetch_rgba_sint);
tg->attrib[i].fetch = (fetch_func)format_desc->fetch_rgba_sint;
} else {
assert(format_desc->fetch_rgba_uint);
tg->attrib[i].fetch = (fetch_func)format_desc->fetch_rgba_uint;
}
} else {
assert(format_desc->fetch_rgba_float);
tg->attrib[i].fetch = (fetch_func)format_desc->fetch_rgba_float;
}
tg->attrib[i].buffer = key->element[i].input_buffer;
tg->attrib[i].input_offset = key->element[i].input_offset;
tg->attrib[i].instance_divisor = key->element[i].instance_divisor;
tg->attrib[i].output_offset = key->element[i].output_offset;
tg->attrib[i].copy_size = -1;
if (tg->attrib[i].type == TRANSLATE_ELEMENT_INSTANCE_ID)
{
if(key->element[i].output_format == PIPE_FORMAT_R32_USCALED
|| key->element[i].output_format == PIPE_FORMAT_R32_SSCALED)
tg->attrib[i].copy_size = 4;
}
else
{
if(key->element[i].input_format == key->element[i].output_format
&& format_desc->block.width == 1
&& format_desc->block.height == 1
&& !(format_desc->block.bits & 7))
tg->attrib[i].copy_size = format_desc->block.bits >> 3;
}
if(tg->attrib[i].copy_size < 0)
tg->attrib[i].emit = get_emit_func(key->element[i].output_format);
else
tg->attrib[i].emit = NULL;
}
tg->nr_attrib = key->nr_elements;
return &tg->translate;
}
boolean translate_generic_is_output_format_supported(enum pipe_format format)
{
switch(format)
{
case PIPE_FORMAT_R64G64B64A64_FLOAT: return TRUE;
case PIPE_FORMAT_R64G64B64_FLOAT: return TRUE;
case PIPE_FORMAT_R64G64_FLOAT: return TRUE;
case PIPE_FORMAT_R64_FLOAT: return TRUE;
case PIPE_FORMAT_R32G32B32A32_FLOAT: return TRUE;
case PIPE_FORMAT_R32G32B32_FLOAT: return TRUE;
case PIPE_FORMAT_R32G32_FLOAT: return TRUE;
case PIPE_FORMAT_R32_FLOAT: return TRUE;
case PIPE_FORMAT_R16G16B16A16_FLOAT: return TRUE;
case PIPE_FORMAT_R16G16B16_FLOAT: return TRUE;
case PIPE_FORMAT_R16G16_FLOAT: return TRUE;
case PIPE_FORMAT_R16_FLOAT: return TRUE;
case PIPE_FORMAT_R32G32B32A32_USCALED: return TRUE;
case PIPE_FORMAT_R32G32B32_USCALED: return TRUE;
case PIPE_FORMAT_R32G32_USCALED: return TRUE;
case PIPE_FORMAT_R32_USCALED: return TRUE;
case PIPE_FORMAT_R32G32B32A32_SSCALED: return TRUE;
case PIPE_FORMAT_R32G32B32_SSCALED: return TRUE;
case PIPE_FORMAT_R32G32_SSCALED: return TRUE;
case PIPE_FORMAT_R32_SSCALED: return TRUE;
case PIPE_FORMAT_R32G32B32A32_UNORM: return TRUE;
case PIPE_FORMAT_R32G32B32_UNORM: return TRUE;
case PIPE_FORMAT_R32G32_UNORM: return TRUE;
case PIPE_FORMAT_R32_UNORM: return TRUE;
case PIPE_FORMAT_R32G32B32A32_SNORM: return TRUE;
case PIPE_FORMAT_R32G32B32_SNORM: return TRUE;
case PIPE_FORMAT_R32G32_SNORM: return TRUE;
case PIPE_FORMAT_R32_SNORM: return TRUE;
case PIPE_FORMAT_R16G16B16A16_USCALED: return TRUE;
case PIPE_FORMAT_R16G16B16_USCALED: return TRUE;
case PIPE_FORMAT_R16G16_USCALED: return TRUE;
case PIPE_FORMAT_R16_USCALED: return TRUE;
case PIPE_FORMAT_R16G16B16A16_SSCALED: return TRUE;
case PIPE_FORMAT_R16G16B16_SSCALED: return TRUE;
case PIPE_FORMAT_R16G16_SSCALED: return TRUE;
case PIPE_FORMAT_R16_SSCALED: return TRUE;
case PIPE_FORMAT_R16G16B16A16_UNORM: return TRUE;
case PIPE_FORMAT_R16G16B16_UNORM: return TRUE;
case PIPE_FORMAT_R16G16_UNORM: return TRUE;
case PIPE_FORMAT_R16_UNORM: return TRUE;
case PIPE_FORMAT_R16G16B16A16_SNORM: return TRUE;
case PIPE_FORMAT_R16G16B16_SNORM: return TRUE;
case PIPE_FORMAT_R16G16_SNORM: return TRUE;
case PIPE_FORMAT_R16_SNORM: return TRUE;
case PIPE_FORMAT_R8G8B8A8_USCALED: return TRUE;
case PIPE_FORMAT_R8G8B8_USCALED: return TRUE;
case PIPE_FORMAT_R8G8_USCALED: return TRUE;
case PIPE_FORMAT_R8_USCALED: return TRUE;
case PIPE_FORMAT_R8G8B8A8_SSCALED: return TRUE;
case PIPE_FORMAT_R8G8B8_SSCALED: return TRUE;
case PIPE_FORMAT_R8G8_SSCALED: return TRUE;
case PIPE_FORMAT_R8_SSCALED: return TRUE;
case PIPE_FORMAT_R8G8B8A8_UNORM: return TRUE;
case PIPE_FORMAT_R8G8B8_UNORM: return TRUE;
case PIPE_FORMAT_R8G8_UNORM: return TRUE;
case PIPE_FORMAT_R8_UNORM: return TRUE;
case PIPE_FORMAT_R8G8B8A8_SNORM: return TRUE;
case PIPE_FORMAT_R8G8B8_SNORM: return TRUE;
case PIPE_FORMAT_R8G8_SNORM: return TRUE;
case PIPE_FORMAT_R8_SNORM: return TRUE;
case PIPE_FORMAT_A8R8G8B8_UNORM: return TRUE;
case PIPE_FORMAT_B8G8R8A8_UNORM: return TRUE;
case PIPE_FORMAT_R32G32B32A32_UINT: return TRUE;
case PIPE_FORMAT_R32G32B32_UINT: return TRUE;
case PIPE_FORMAT_R32G32_UINT: return TRUE;
case PIPE_FORMAT_R32_UINT: return TRUE;
case PIPE_FORMAT_R16G16B16A16_UINT: return TRUE;
case PIPE_FORMAT_R16G16B16_UINT: return TRUE;
case PIPE_FORMAT_R16G16_UINT: return TRUE;
case PIPE_FORMAT_R16_UINT: return TRUE;
case PIPE_FORMAT_R8G8B8A8_UINT: return TRUE;
case PIPE_FORMAT_R8G8B8_UINT: return TRUE;
case PIPE_FORMAT_R8G8_UINT: return TRUE;
case PIPE_FORMAT_R8_UINT: return TRUE;
case PIPE_FORMAT_R32G32B32A32_SINT: return TRUE;
case PIPE_FORMAT_R32G32B32_SINT: return TRUE;
case PIPE_FORMAT_R32G32_SINT: return TRUE;
case PIPE_FORMAT_R32_SINT: return TRUE;
case PIPE_FORMAT_R16G16B16A16_SINT: return TRUE;
case PIPE_FORMAT_R16G16B16_SINT: return TRUE;
case PIPE_FORMAT_R16G16_SINT: return TRUE;
case PIPE_FORMAT_R16_SINT: return TRUE;
case PIPE_FORMAT_R8G8B8A8_SINT: return TRUE;
case PIPE_FORMAT_R8G8B8_SINT: return TRUE;
case PIPE_FORMAT_R8G8_SINT: return TRUE;
case PIPE_FORMAT_R8_SINT: return TRUE;
case PIPE_FORMAT_B10G10R10A2_UNORM: return TRUE;
case PIPE_FORMAT_B10G10R10A2_USCALED: return TRUE;
case PIPE_FORMAT_B10G10R10A2_SNORM: return TRUE;
case PIPE_FORMAT_B10G10R10A2_SSCALED: return TRUE;
case PIPE_FORMAT_R10G10B10A2_UNORM: return TRUE;
case PIPE_FORMAT_R10G10B10A2_USCALED: return TRUE;
case PIPE_FORMAT_R10G10B10A2_SNORM: return TRUE;
case PIPE_FORMAT_R10G10B10A2_SSCALED: return TRUE;
default: return FALSE;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -212,10 +212,6 @@
#define PIPE_OS_UNIX #define PIPE_OS_UNIX
#endif #endif
#if defined(_WIN32) || defined(WIN32)
#define PIPE_OS_WINDOWS
#endif
#if defined(__HAIKU__) #if defined(__HAIKU__)
#define PIPE_OS_HAIKU #define PIPE_OS_HAIKU
#define PIPE_OS_UNIX #define PIPE_OS_UNIX