Update capn_write_ptr to handle recursive structures when copying

This commit is contained in:
James McKaskill 2013-05-05 19:30:33 -04:00
parent 14ac488681
commit edfcaf9d34
5 changed files with 704 additions and 269 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
/capn.o
/capn.so

12
Makefile Normal file
View file

@ -0,0 +1,12 @@
.PHONY: all clean
all: capn.so
clean:
rm -f *.o *.so
%.o: %.c *.h *.inc
$(CC) -Wall -Werror -g -O2 -c $< -o $@
capn.so: capn.o
$(CC) -shared -Wall -Werror -fPIC -g -O2 $^ -o $@

View file

@ -14,7 +14,7 @@ int CAT(capn_read,SZ) (const struct CAT(capn_list,SZ) *list, int off, UINT_T *to
switch (p->type) {
case CAPN_LIST:
if (p->datasz == SZ/8 && !p->ptrsz && (SZ == 8 || LITTLE_ENDIAN)) {
if (p->datasz == SZ/8 && !p->ptrsz && (SZ == 8 || CAPN_LITTLE)) {
memcpy(to, p->data + off, sz * (SZ/8));
return sz;
} else if (p->datasz < SZ/8) {
@ -53,7 +53,7 @@ int CAT(capn_write,SZ) (struct CAT(capn_list,SZ) *list, int off, const UINT_T *f
switch (p->type) {
case CAPN_LIST:
if (p->datasz == SZ/8 && !p->ptrsz && (SZ == 8 || LITTLE_ENDIAN)) {
if (p->datasz == SZ/8 && !p->ptrsz && (SZ == 8 || CAPN_LITTLE)) {
memcpy(p->data + off, from, sz * (SZ/8));
return sz;
} else if (p->datasz < SZ/8) {

697
capn.c
View file

@ -3,6 +3,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/param.h>
#define STRUCT_PTR 0
#define LIST_PTR 1
@ -28,18 +29,196 @@
static int min(int a, int b) { return (a < b) ? a : b; }
#endif
static struct capn_segment *lookup_segment(struct capn_segment *s, uint32_t id) {
if (s->id == id)
return s;
if (!s->capn || !s->capn->lookup)
#ifdef BYTE_ORDER
#define CAPN_LITTLE (BYTE_ORDER == LITTLE_ENDIAN)
#elif defined(__BYTE_ORDER)
#define CAPN_LITTLE (__BYTE_ORDER == __LITTLE_ENDIAN)
#else
#define CAPN_LITTLE 0
#endif
/* from to
* G G
* / \ / \
* P U N U
* / \ / \
* 1 N P 3
* / \ / \
* 2 3 1 2
*/
static void rotate_left(struct capn_tree **pp, struct capn_tree **pn) {
struct capn_tree *p = *pp, *n = *pn;
struct capn_tree *g = p->parent;
p->right = n->left;
n->left = p;
g->left = n;
n->parent = g;
p->parent = n;
*pn = p;
*pp = n;
}
/* from to
* G G
* / \ / \
* U P U N
* / \ / \
* N 3 1 P
* / \ / \
* 1 2 2 3
*/
static void rotate_right(struct capn_tree **pp, struct capn_tree **pn) {
struct capn_tree *p = *pp, *n = *pn;
struct capn_tree *g = p->parent;
p->left = n->right;
n->right = p;
g->right = n;
n->parent = g;
p->parent = n;
*pn = p;
*pp = n;
}
static void insert_rebalance(struct capn_tree **root, struct capn_tree *n) {
n->red = 1;
n->left = n->right = NULL;
for (;;) {
struct capn_tree *p, *u, *g;
p = n->parent;
if (!p) {
*root = n;
n->red = 0;
return;
}
g = p->parent;
if (!g->red) {
return;
}
if (p == g->left) {
if ((u = g->right) != NULL && u->red) {
p->red = 0;
u->red = 0;
g->red = 1;
n = g;
continue;
}
if (n == p->right) {
rotate_left(&p, &n);
}
p->red = 0;
g->red = 1;
rotate_right(&g, &p);
} else {
if ((u = g->left) != NULL && u->red) {
p->red = 0;
u->red = 0;
g->red = 1;
n = g;
continue;
}
if (n == p->left) {
rotate_right(&p, &n);
}
p->red = 0;
g->red = 1;
rotate_left(&g, &p);
}
}
}
static void *new_data(struct capn_segment **ps, int sz) {
struct capn *c = (*ps)->capn;
struct capn_segment *s = *ps;
/* find a segment with sufficient data */
for (s = c->seglist; s != NULL; s = s->next) {
if (s->len + sz <= s->cap) {
goto end;
}
}
s = c->create ? c->create(c->user, c->segnum, sz) : NULL;
if (!s) {
return NULL;
return s->capn->lookup(s->capn->user, id);
}
s->capn = c;
s->id = c->segnum++;
s->next = c->seglist;
c->seglist = s;
s->hdr.parent = c->lastseg;
c->lastseg->right = &s->hdr;
c->lastseg = &s->hdr;
insert_rebalance(&c->segtree, &s->hdr);
end:
*ps = s;
s->len += sz;
return s->data + s->len - sz;
}
void capn_append_segment(struct capn *c, struct capn_segment *s) {
s->id = c->segnum++;
s->capn = c;
s->next = c->seglist;
c->seglist = s;
s->hdr.parent = c->lastseg;
c->lastseg->right = &s->hdr;
c->lastseg = &s->hdr;
insert_rebalance(&c->segtree, &s->hdr);
}
static struct capn_segment *lookup_segment(struct capn* c, struct capn_segment *s, uint32_t id) {
struct capn_tree **x;
struct capn_segment *y;
if (s && s->id == id)
return s;
if (!c)
return NULL;
x = &c->segtree;
y = NULL;
while (*x) {
y = (struct capn_segment*) *x;
if (id == y->id) {
return y;
} else if (id < y->id) {
x = &y->hdr.left;
} else {
x = &y->hdr.right;
}
}
s = c->lookup ? c->lookup(c->user, id) : NULL;
if (!s)
return NULL;
if (id >= c->segnum) {
c->lastseg = &s->hdr;
c->segnum = id+1;
}
s->id = id;
s->capn = c;
s->next = c->seglist;
c->seglist = s;
s->hdr.parent = &y->hdr;
*x = &s->hdr;
insert_rebalance(&c->segtree, &s->hdr);
return s;
}
static uint64_t lookup_far(struct capn_segment **s, char **d, uint64_t val) {
uint32_t off = U32(val >> 3);
if ((*s = lookup_segment(*s, U32(val >> 32))) == NULL) {
if ((*s = lookup_segment((*s)->capn, *s, U32(val >> 32))) == NULL) {
return 0;
}
@ -60,7 +239,7 @@ static uint64_t lookup_far(struct capn_segment **s, char **d, uint64_t val) {
return 0;
}
if ((*s = lookup_segment(*s, U32(far >> 32))) == NULL) {
if ((*s = lookup_segment((*s)->capn, *s, U32(far >> 32))) == NULL) {
return 0;
}
@ -94,61 +273,27 @@ static char *struct_ptr(struct capn_segment *s, char *d) {
return NULL;
}
struct capn_ptr capn_read_ptr(const struct capn_ptr *p, int off) {
char *d, *e;
static struct capn_ptr read_ptr(struct capn_segment *s, char *d) {
char *e;
struct capn_ptr ret;
uint64_t val;
switch (p->type) {
case CAPN_LIST:
/* Return an inner pointer */
if (off >= p->size) {
goto err;
}
ret.type = CAPN_STRUCT;
ret.data = p->data + off * (p->datasz + p->ptrsz);
ret.seg = p->seg;
ret.datasz = p->datasz;
ret.ptrsz = p->ptrsz;
return ret;
case CAPN_STRUCT:
off *= 8;
if (off >= p->ptrsz) {
goto err;
}
d = p->data + p->datasz + off;
break;
case CAPN_PTR_LIST:
if (off >= p->size) {
goto err;
}
d = p->data + off * 8;
break;
default:
goto err;
}
val = capn_flip64(*(uint64_t*) d);
ret.seg = p->seg;
if ((val&3) == FAR_PTR) {
val = lookup_far(&ret.seg, &d, val);
val = lookup_far(&s, &d, val);
}
d += (I32(U32(val)) << 1) + 8;
ret.data = d;
if ((val&3) > LIST_PTR || d < ret.seg->data) {
if ((val&3) > LIST_PTR || d < s->data) {
goto err;
}
if ((val&3) == STRUCT_PTR) {
ret.type = CAPN_STRUCT;
ret.size = 0;
ret.datasz = U32(U16(val >> 32)) * 8;
ret.ptrsz = U32(U16(val >> 48)) * 8;
e = d + ret.size * (ret.datasz + ret.ptrsz);
@ -185,11 +330,10 @@ struct capn_ptr capn_read_ptr(const struct capn_ptr *p, int off) {
break;
case PTR_LIST:
ret.type = CAPN_PTR_LIST;
ret.ptrsz = 8;
e = d + ret.size * 8;
break;
case COMPOSITE_LIST:
if (d+8-ret.seg->data > ret.seg->len) {
if (d+8-s->data > s->len) {
goto err;
}
@ -209,8 +353,49 @@ struct capn_ptr capn_read_ptr(const struct capn_ptr *p, int off) {
}
}
if (e - ret.seg->data <= ret.seg->len) {
if (e - s->data > s->len)
goto err;
ret.seg = s;
return ret;
err:
memset(&ret, 0, sizeof(ret));
return ret;
}
struct capn_ptr capn_read_ptr(const struct capn_ptr *p, int off) {
struct capn_ptr ret;
switch (p->type) {
case CAPN_LIST:
/* Return an inner pointer */
if (off >= p->size) {
goto err;
}
ret = *p;
ret.type = CAPN_LIST_MEMBER;
ret.data += off * (p->datasz + p->ptrsz);
ret.size = 0;
return ret;
case CAPN_STRUCT:
case CAPN_LIST_MEMBER:
off *= 8;
if (off >= p->ptrsz) {
goto err;
}
return read_ptr(p->seg, p->data + p->datasz + off);
case CAPN_PTR_LIST:
if (off >= p->size) {
goto err;
}
return read_ptr(p->seg, p->data + off * 8);
default:
goto err;
}
err:
@ -271,11 +456,12 @@ static void write_ptr_tag(char *d, const struct capn_ptr *p, int off) {
}
static int has_tag(const struct capn_ptr* p) {
struct capn_segment *s;
char *d = p->data - 8;
return d >= s->data && ptr_value(p, 0) == *(uint64_t*) d;
return d >= p->seg->data && ptr_value(p, 0) == *(uint64_t*) d;
}
#define NEED_TO_COPY 1
static int write_ptr(struct capn_segment *s, char *d, const struct capn_ptr *p) {
/* note p->seg can be NULL if its a ptr to static data */
@ -283,16 +469,19 @@ static int write_ptr(struct capn_segment *s, char *d, const struct capn_ptr *p)
*(uint64_t*) d = 0;
return 0;
} else if (p->seg && p->seg == s) {
} else if (!p->seg || p->seg->capn != s->capn || p->type == CAPN_LIST_MEMBER) {
return NEED_TO_COPY;
} else if (p->seg == s) {
write_ptr_tag(d, p, p->data - d - 8);
return 0;
} else if (p->seg && p->seg->capn == s->capn && ((p->data - p->seg->data) & 7) == 0) {
} else {
/* if its in the same context we can create a far pointer */
if (has_tag(p)) {
/* By lucky chance, the data has a tag in front
* of it. This happens when new_data had to move
* of it. This happens when new_object had to move
* the data to a new segment. */
write_far_ptr(d, p->seg, p->data);
return 0;
@ -307,144 +496,310 @@ static int write_ptr(struct capn_segment *s, char *d, const struct capn_ptr *p)
} else {
/* have to allocate room for a double far
* pointer, but try to allocate it in our
* starting segment first */
* pointer */
char *t;
if (s->len + 16 > s->cap) {
if (!s->capn->create)
return -1;
if ((s = s->capn->create(s->capn->user, 16)) == NULL)
return -1;
if (s->len + 16 <= s->cap) {
/* Try and allocate in the src segment
* first. This should improve lookup on
* read. */
t = s->data + s->len;
s->len += 16;
} else {
t = new_data(&s, 16);
if (!t) return -1;
}
t = s->data + s->len;
write_far_ptr(t, p->seg, p->data);
write_ptr_tag(t+8, p, 0);
write_double_far(d, s, t);
s->len += 16;
return 0;
}
}
}
} else {
/* different context or not aligned - have to copy */
struct capn_ptr copy;
struct copy {
struct capn_tree hdr;
struct capn_ptr to, from;
int fsize;
};
static int data_size(const struct capn_ptr *p) {
switch (p->type) {
case CAPN_STRUCT:
copy = capn_new_struct(s, p->datasz, p->ptrsz);
break;
case CAPN_PTR_LIST:
copy = capn_new_ptr_list(s, p->size);
break;
case CAPN_BIT_LIST:
copy = capn_new_bit_list(s, p->size);
break;
return p->datasz;
case CAPN_PTR_LIST:
return p->size*8;
case CAPN_STRUCT:
case CAPN_LIST_MEMBER:
return p->datasz + p->ptrsz;
case CAPN_LIST:
copy = capn_new_list(s, p->size, p->datasz, p->ptrsz);
break;
return p->size * (p->datasz + p->ptrsz);
default:
return 0;
}
}
static struct capn_ptr new_clone(struct capn_segment *s, const struct capn_ptr *p) {
switch (p->type) {
case CAPN_LIST_MEMBER:
case CAPN_STRUCT:
return capn_new_struct(s, p->datasz, p->ptrsz);
case CAPN_PTR_LIST:
return capn_new_ptr_list(s, p->size);
case CAPN_BIT_LIST:
return capn_new_bit_list(s, p->size);
case CAPN_LIST:
return capn_new_list(s, p->size, p->datasz, p->ptrsz);
default:
return *p;
}
}
static int is_ptr_equal(const struct capn_ptr *a, const struct capn_ptr *b) {
return a->data == b->data && a->type == b->type && a->size == b->size && a->datasz == b->datasz && a->ptrsz == b->ptrsz;
}
static int write_copy(struct capn_segment *seg, char *data, struct capn_ptr *t, struct capn_ptr *f, int *dep) {
struct capn *c = seg->capn;
struct copy *cp = (struct copy*) c->copy;
int sz = data_size(f);
/* We always copy list members as it would otherwise be an
* overlapped pointer (the data is owned by the inclosing list).
* We do not bother with the overlapped lookup for zero sized
* structures/lists as they never overlap. Nor do we add them to
* the copy list as there is no data to be shared by multiple
* pointers.
*/
while (c && sz) {
if (f->data + sz <= cp->from.data) {
cp = (struct copy*) cp->hdr.left;
} else if (cp->from.data + cp->fsize <= f->data) {
cp = (struct copy*) cp->hdr.right;
} else if (is_ptr_equal(f, &cp->from)) {
/* we already have a copy so just point to that */
return write_ptr(seg, data, &cp->from);
} else {
/* pointer to overlapped data */
return -1;
}
}
/* no copy - have to copy */
*t = new_clone(seg, f);
/* add the copy to the copy tree so we can look for overlapping
* source pointers and handle recursive structures */
if (sz && f->type != CAPN_LIST_MEMBER) {
struct copy *n;
struct capn_segment *cs = c->copylist;
/* need to allocate a struct copy */
if (!cs || cs->len + sizeof(*n) > cs->cap) {
cs = c->create ? c->create(c->user, CAPN_SEGID_LOCAL, sizeof(*n)) : NULL;
if (!cs) {
/* can't allocate a copy structure */
return -1;
}
cs->next = c->copylist;
c->copylist = cs;
}
n = (struct copy*) (cs->data + cs->len);
cs->len += sizeof(*n);
n->hdr.parent = &cp->hdr;
n->from = *f;
n->to = *t;
n->fsize = sz;
if (f->data < cp->from.data) {
cp->hdr.left = &n->hdr;
} else {
cp->hdr.right = &n->hdr;
}
insert_rebalance(&seg->capn->copy, &n->hdr);
}
/* minimize the number of types the main copy routine has to
* deal with to just CAPN_LIST and CAPN_PTR_LIST. ptr list only
* needs t->type, t->size, t->data, t->seg, f->data, f->seg to
* be valid */
switch (t->type) {
case CAPN_STRUCT:
case CAPN_LIST_MEMBER:
if (t->datasz) {
memcpy(t->data, f->data, t->datasz);
t->data += t->datasz;
f->data += t->datasz;
}
if (t->ptrsz) {
t->type = CAPN_PTR_LIST;
t->size = t->ptrsz/8;
(*dep)++;
}
return 0;
case CAPN_BIT_LIST:
memcpy(t->data, f->data, t->datasz);
return 0;
case CAPN_LIST:
if (!t->size) {
/* empty list - nothing to copy */
} else if (t->ptrsz && t->datasz) {
(*dep)++;
} else if (t->datasz) {
memcpy(t->data, f->data, t->size * t->datasz);
} else if (t->ptrsz) {
t->type = CAPN_PTR_LIST;
t->size *= t->ptrsz/8;
(*dep)++;
}
return 0;
case CAPN_PTR_LIST:
if (t->size) {
(*dep)++;
}
return 0;
default:
return -1;
}
}
return capn_copy(&copy, p) || write_ptr(s, d, &copy);
}
}
#define MAX_COPY_DEPTH 32
int capn_write_ptr(struct capn_ptr *p, int off, const struct capn_ptr *tgt) {
struct capn_ptr inner;
struct capn_ptr to[MAX_COPY_DEPTH], from[MAX_COPY_DEPTH];
char *data;
int err, dep;
switch (p->type) {
case CAPN_LIST:
if (off >= p->size)
if (off < p->size && (tgt->type == CAPN_STRUCT || tgt->type == CAPN_LIST_MEMBER)) {
struct capn_ptr *f, *t;
char *d;
int sz;
/* copy struct data */
d = p->data + off * (p->datasz + p->ptrsz);
sz = min(p->datasz, tgt->datasz);
memcpy(d, tgt->data, sz);
memset(d + sz, 0, p->datasz - sz);
/* reset excess pointers */
d += p->datasz;
sz = min(p->ptrsz, tgt->ptrsz);
memset(d + sz, 0, p->ptrsz - sz);
/* create a pointer list for the main loop to copy */
dep = 1;
/* main copy loop doesn't need the other fields
* for ptr lists */
f = &from[0];
f->data = tgt->data + tgt->datasz;
f->seg = tgt->seg;
t = &to[0];
t->type = CAPN_PTR_LIST;
t->data = d;
t->size = sz/8;
t->seg = p->seg;
goto copy_loop;
} else {
return -1;
inner = capn_read_ptr(p, off);
return capn_copy(&inner, tgt);
}
case CAPN_PTR_LIST:
if (off >= p->size)
return -1;
return write_ptr(p->seg, p->data + off * 8, tgt);
data = p->data + off * 8;
break;
case CAPN_STRUCT:
case CAPN_LIST_MEMBER:
off *= 8;
if (off >= p->ptrsz)
return -1;
return write_ptr(p->seg, p->data + p->datasz + off, tgt);
data = p->data + p->datasz + off;
break;
default:
return -1;
}
}
static int copy_ptrs(struct capn_ptr *t, const struct capn_ptr *f, int reset_excess) {
int tptrs = t->ptrsz / 8;
int fptrs = f->ptrsz / 8;
int i;
err = write_ptr(p->seg, data, tgt);
if (err != NEED_TO_COPY)
return err;
for (i = 0; i < min(tptrs, fptrs); i++) {
struct capn_ptr p = capn_read_ptr(f, i);
if (capn_write_ptr(t, i, &p))
/* Depth first copy the source whilst using a pointer stack to
* maintain the ptr to set and size left to copy at each level.
* We also maintain a rbtree (capn->copy) of the copies indexed
* by the source data. This way we can detect overlapped
* pointers in the source (and bail) and recursive structures
* (and point to the previous copy).
*/
dep = 0;
from[0] = *tgt;
if (write_copy(p->seg, data, to, from, &dep))
return -1;
copy_loop:
while (dep) {
struct capn_ptr *tc = &to[dep-1], *tn = &to[dep];
struct capn_ptr *fc = &from[dep-1], *fn = &from[dep];
if (dep+1 == MAX_COPY_DEPTH) {
return -1;
}
if (reset_excess) {
for (i = min(tptrs, fptrs); i < tptrs; i++) {
capn_write_ptr(t, i, NULL);
}
}
return 0;
}
int capn_copy(struct capn_ptr *t, const struct capn_ptr *f) {
int fsz = f->size * (f->datasz + f->ptrsz);
int tsz = t->size * (t->datasz + t->ptrsz);
int msz = min(fsz, tsz);
switch (t->type) {
case CAPN_STRUCT:
if (f->type == CAPN_STRUCT) {
/* For structs we reset the excess as reading
* from the end of a undersized struct just
* reads zeros */
memcpy(t->data, f->data, msz);
memset(t->data + msz, 0, tsz - msz);
return copy_ptrs(t, f, 1);
} else {
return -1;
if (!tc->size) {
dep--;
continue;
}
switch (tc->type) {
case CAPN_LIST:
if (f->type == CAPN_LIST && !f->ptrsz && !t->ptrsz && f->datasz == t->datasz) {
memcpy(t->data, f->data, msz);
return 0;
} else if (f->type == CAPN_LIST || f->type == CAPN_PTR_LIST) {
return copy_ptrs(t, f, 0);
} else {
*fn = *fc;
*tn = *tc;
fn->type = tn->type = CAPN_LIST_MEMBER;
fn->size = tn->size = 0;
if (write_copy(tc->seg, tc->data, tn, fn, &dep))
return -1;
}
fc->data += tc->datasz + tc->ptrsz;
tc->data += tc->datasz + tc->ptrsz;
tc->size--;
break;
case CAPN_PTR_LIST:
if (f->type == CAPN_LIST || f->type == CAPN_PTR_LIST) {
return copy_ptrs(t, f, 0);
} else {
return -1;
}
case CAPN_BIT_LIST:
if (f->type != CAPN_BIT_LIST) {
memcpy(t->data, f->data, min(t->datasz, f->datasz));
return 0;
} else {
return -1;
}
default:
*fn = read_ptr(fc->seg, fc->data);
if (write_copy(tc->seg, tc->data, tn, fn, &dep))
return -1;
fc->data += 8;
tc->data += 8;
tc->size--;
break;
}
}
return 0;
}
int capn_read1(const struct capn_list1 *list, int off, uint8_t *data, int sz) {
/* Note we only support aligned reads */
int bsz;
@ -484,56 +839,69 @@ int capn_write1(struct capn_list1 *list, int off, const uint8_t *data, int sz) {
}
#define SZ 8
#include "capn-list.c"
#include "capn-list.inc"
#undef SZ
#define SZ 16
#include "capn-list.c"
#include "capn-list.inc"
#undef SZ
#define SZ 32
#include "capn-list.c"
#include "capn-list.inc"
#undef SZ
#define SZ 64
#include "capn-list.c"
#include "capn-list.inc"
#undef SZ
static void new_data(struct capn_ptr *p, int bytes) {
static void new_object(struct capn_ptr *p, int bytes) {
struct capn_segment *s = p->seg;
/* all allocations are 8 byte aligned */
bytes = (bytes + 7) & ~7;
if (s->len + bytes <= s->cap) {
p->data = p->data + s->len;
p->data = s->data + s->len;
s->len += bytes;
return;
}
/* add a tag whenever we switch segments so that write_ptr can
* use it */
if (!s->capn->create)
goto err;
s = s->capn->create(s->capn->user, bytes + 8);
if (!s)
goto err;
write_ptr_tag(s->data + s->len, p, 0);
return;
err:
p->data = new_data(&s, bytes + 8);
if (!p->data) {
memset(p, 0, sizeof(*p));
return;
}
write_ptr_tag(p->data, p, 0);
p->data += 8;
}
struct capn_ptr capn_root(struct capn* c) {
struct capn_ptr p;
p.seg = lookup_segment(c, NULL, 0);
if (!p.seg) goto err;
p.data = p.seg->data;
p.size = 1;
p.type = CAPN_PTR_LIST;
p.datasz = 0;
p.ptrsz = 0;
new_object(&p, 8);
return p;
err:
memset(&p, 0, sizeof(p));
return p;
}
struct capn_ptr capn_new_struct(struct capn_segment *seg, int datasz, int ptrs) {
struct capn_ptr p;
p.seg = seg;
p.type = CAPN_STRUCT;
p.size = 0;
p.datasz = (datasz + 7) & ~7;
p.ptrsz = ptrs * 8;
new_data(&p, p.datasz + p.ptrsz);
new_object(&p, p.datasz + p.ptrsz);
return p;
}
@ -554,7 +922,7 @@ struct capn_ptr capn_new_list(struct capn_segment *seg, int sz, int datasz, int
p.ptrsz = 0;
}
new_data(&p, p.size * (p.datasz+p.ptrsz));
new_object(&p, p.size * (p.datasz+p.ptrsz));
return p;
}
@ -563,8 +931,9 @@ struct capn_ptr capn_new_bit_list(struct capn_segment *seg, int sz) {
p.seg = seg;
p.type = CAPN_BIT_LIST;
p.datasz = (sz+7)/8;
p.ptrsz = 0;
p.size = sz;
new_data(&p, p.datasz);
new_object(&p, p.datasz);
return p;
}
@ -573,9 +942,9 @@ struct capn_ptr capn_new_ptr_list(struct capn_segment *seg, int sz) {
p.seg = seg;
p.type = CAPN_PTR_LIST;
p.size = sz;
p.ptrsz = 8;
p.ptrsz = 0;
p.datasz = 0;
new_data(&p, sz*8);
new_object(&p, sz*8);
return p;
}
@ -586,7 +955,7 @@ struct capn_ptr capn_new_string(struct capn_segment *seg, const char *str, int s
p.size = ((sz >= 0) ? sz : strlen(str)) + 1;
p.datasz = 1;
p.ptrsz = 0;
new_data(&p, p.size);
new_object(&p, p.size);
if (p.data) {
memcpy(p.data, str, p.size-1);
}
@ -658,3 +1027,7 @@ int capn_write_data(struct capn_ptr *p, int off, struct capn_data tgt) {
}
return capn_write_ptr(p, off, &m);
}
int capn_marshal_iptr(const union capn_iptr *ip, struct capn_ptr *p, int off) {
return capn_write_ptr(p, off, &ip->c);
}

142
capn.h
View file

@ -5,27 +5,49 @@
#include <stdint.h>
typedef struct capn_segment *(*capn_create_t)(void* /*user*/, int /*sz*/);
typedef struct capn_segment *(*capn_lookup_t)(void* /*user*/, uint32_t /*id*/);
#define CAPN_SEGID_LOCAL 0xFFFFFFFF
/* struct capn is a common structure shared between segments in the same
* session/context so that far pointers between the segments will be created.
*
* lookup is used to lookup segments by id when derefencing a far pointer
*
* create is used to create or lookup an alternate segment that has at least
* sz available (ie returned seg->len + sz <= seg->cap)
* create and lookup can be NULL if you don't need multiple segments
*
* create and lookup can be NULL if you don't need multiple segments and don't
* want to support copying
*
* create is also used to allocate room for the copy tree with id ==
* CAPN_SEGID_LOCAL. This data should be allocated in the local memory space
*
* seglist and copylist are linked lists which can be used to free up segments
* on cleanup
*
* lookup, create, and user can be set by the user. Other values should be
* zero initialized.
*/
struct capn {
struct capn_segment *(*lookup)(void* /*user*/, uint32_t /*id */);
struct capn_segment *(*create)(void* /*user*/, int /*sz*/);
struct capn_segment *(*create)(void* /*user*/, uint32_t /*id */, int /*sz*/);
void *user;
uint32_t segnum;
struct capn_tree *copy;
struct capn_tree *segtree, *lastseg;
struct capn_segment *seglist;
struct capn_segment *copylist;
};
struct capn_tree {
struct capn_tree *left, *right, *parent;
unsigned int red : 1;
};
/* struct capn_segment contains the information about a single segment.
* capn should point to a struct capn that is shared between segments in the
* same session
* id specifies the segment id, used for far pointers
* data specifies the segment data. This should not move once.
* data specifies the segment data. This should not move after creation.
* len specifies the current segment length. This should be 0 for a blank
* segment.
* cap specifies the segment capacity.
@ -33,8 +55,13 @@ struct capn {
* at which point a new segment will be requested via capn->create.
*
* data, len, and cap must all by 8 byte aligned
*
* data, len, cap, id should all set by the user. Other values should be zero
* initialized.
*/
struct capn_segment {
struct capn_tree hdr;
struct capn_segment *next;
struct capn *capn;
uint32_t id;
char *data;
@ -47,6 +74,7 @@ enum CAPN_TYPE {
CAPN_LIST = 2,
CAPN_PTR_LIST = 3,
CAPN_BIT_LIST = 4,
CAPN_LIST_MEMBER = 5,
};
struct capn_ptr {
@ -70,12 +98,32 @@ struct capn_data {
struct capn_segment *seg;
};
union capn_iptr {
struct capn_ptr c;
uintptr_t u;
void *p;
};
struct capn_ret_vt {
void (*free)(void*);
};
struct capn_list1{struct capn_ptr p;};
struct capn_list8{struct capn_ptr p;};
struct capn_list16{struct capn_ptr p;};
struct capn_list32{struct capn_ptr p;};
struct capn_list64{struct capn_ptr p;};
/* capn_append_segment appends a segment to a session */
void capn_append_segment(struct capn*, struct capn_segment*);
/* capn_root returns a fake pointer that can be used to read/write the session
* root object using capn_(read|write)_ptr at index 0. The root is the object
* pointed to by a ptr at offset 0 in segment 0. This will allocate room for
* the root if not already.
*/
struct capn_ptr capn_root(struct capn*);
/* capn_read|write_ptr functions read/write ptrs to list/structs
* off is the list index or pointer index in a struct
* capn_write_ptr will copy the data, create far pointers, etc if the target
@ -95,12 +143,6 @@ struct capn_data capn_read_data(const struct capn_ptr *p, int off);
int capn_write_text(struct capn_ptr *p, int off, struct capn_text tgt);
int capn_write_data(struct capn_ptr *p, int off, struct capn_data tgt);
/* capn_copy copies data from 'from' to 'to'
* returns 0 on success, non-zero on error (type mismatch, allocation error,
* etc).
*/
int capn_copy(struct capn_ptr *to, const struct capn_ptr *from);
/* capn_read_* functions read data from a list
* The length of the list is given by p->size
* off specifies how far into the list to start
@ -137,53 +179,59 @@ struct capn_ptr capn_new_bit_list(struct capn_segment *seg, int sz);
struct capn_ptr capn_new_ptr_list(struct capn_segment *seg, int sz);
struct capn_ptr capn_new_string(struct capn_segment *seg, const char *str, int sz);
#if defined(__cplusplus) || (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
#define CAPN_INLINE inline
#else
#define CAPN_INLINE static
#endif
/* capn_get|set_* functions get/set struct values
* off is the offset into the structure in bytes
* Rarely should these be called directly, instead use the generated code.
* Data must be xored with the default value
* These are static in order to be inlined.
* These are inlined
*/
static uint8_t capn_get8(const struct capn_ptr *p, int off);
static uint16_t capn_get16(const struct capn_ptr *p, int off);
static uint32_t capn_get32(const struct capn_ptr *p, int off);
static uint64_t capn_get64(const struct capn_ptr *p, int off);
static int capn_set8(struct capn_ptr *p, int off, uint8_t val);
static int capn_set16(struct capn_ptr *p, int off, uint16_t val);
static int capn_set32(struct capn_ptr *p, int off, uint32_t val);
static int capn_set64(struct capn_ptr *p, int off, uint64_t val);
CAPN_INLINE uint8_t capn_get8(const struct capn_ptr *p, int off);
CAPN_INLINE uint16_t capn_get16(const struct capn_ptr *p, int off);
CAPN_INLINE uint32_t capn_get32(const struct capn_ptr *p, int off);
CAPN_INLINE uint64_t capn_get64(const struct capn_ptr *p, int off);
CAPN_INLINE int capn_set8(struct capn_ptr *p, int off, uint8_t val);
CAPN_INLINE int capn_set16(struct capn_ptr *p, int off, uint16_t val);
CAPN_INLINE int capn_set32(struct capn_ptr *p, int off, uint32_t val);
CAPN_INLINE int capn_set64(struct capn_ptr *p, int off, uint64_t val);
int capn_marshal_iptr(const union capn_iptr*, struct capn_ptr*, int off);
/* Inline functions */
#define T(IDX) s.v[IDX] = (uint8_t) (v >> (8*IDX))
static uint8_t capn_flip8(uint8_t v) {
CAPN_INLINE uint8_t capn_flip8(uint8_t v) {
return v;
}
static uint16_t capn_flip16(uint16_t v) {
CAPN_INLINE uint16_t capn_flip16(uint16_t v) {
union { uint16_t u; uint8_t v[2]; } s;
T(0); T(1);
return s.u;
}
static uint32_t capn_flip32(uint32_t v) {
union { uint32_t u; uint16_t v[2]; } s;
CAPN_INLINE uint32_t capn_flip32(uint32_t v) {
union { uint32_t u; uint8_t v[4]; } s;
T(0); T(1); T(2); T(3);
return s.u;
}
static uint64_t capn_flip64(uint64_t v) {
union { uint64_t u; uint32_t v[2]; } s;
CAPN_INLINE uint64_t capn_flip64(uint64_t v) {
union { uint64_t u; uint8_t v[8]; } s;
T(0); T(1); T(2); T(3); T(4); T(5); T(6); T(7);
return s.u;
}
#undef T
static uint8_t capn_get8(const struct capn_ptr *p, int off) {
return (p->type == CAPN_STRUCT && off < p->datasz) ? capn_flip8(*(uint8_t*) p->data) : 0;
CAPN_INLINE uint8_t capn_get8(const struct capn_ptr *p, int off) {
return off < p->datasz ? capn_flip8(*(uint8_t*) p->data) : 0;
}
static int capn_set8(struct capn_ptr *p, int off, uint8_t val) {
if (p->type == CAPN_STRUCT && off < p->datasz) {
CAPN_INLINE int capn_set8(struct capn_ptr *p, int off, uint8_t val) {
if (off < p->datasz) {
*(uint8_t*) p->data = capn_flip8(val);
return 0;
} else {
@ -191,11 +239,11 @@ static int capn_set8(struct capn_ptr *p, int off, uint8_t val) {
}
}
static uint16_t capn_get16(const struct capn_ptr *p, int off) {
return (p->type == CAPN_STRUCT && off < p->datasz) ? capn_flip16(*(uint16_t*) p->data) : 0;
CAPN_INLINE uint16_t capn_get16(const struct capn_ptr *p, int off) {
return off < p->datasz ? capn_flip16(*(uint16_t*) p->data) : 0;
}
static int capn_set16(struct capn_ptr *p, int off, uint16_t val) {
if (p->type == CAPN_STRUCT && off < p->datasz) {
CAPN_INLINE int capn_set16(struct capn_ptr *p, int off, uint16_t val) {
if (off < p->datasz) {
*(uint16_t*) p->data = capn_flip16(val);
return 0;
} else {
@ -203,11 +251,11 @@ static int capn_set16(struct capn_ptr *p, int off, uint16_t val) {
}
}
static uint32_t capn_get32(const struct capn_ptr *p, int off) {
return (p->type == CAPN_STRUCT && off < p->datasz) ? capn_flip32(*(uint32_t*) p->data) : 0;
CAPN_INLINE uint32_t capn_get32(const struct capn_ptr *p, int off) {
return off < p->datasz ? capn_flip32(*(uint32_t*) p->data) : 0;
}
static int capn_set32(struct capn_ptr *p, int off, uint32_t val) {
if (p->type == CAPN_STRUCT && off < p->datasz) {
CAPN_INLINE int capn_set32(struct capn_ptr *p, int off, uint32_t val) {
if (off < p->datasz) {
*(uint32_t*) p->data = capn_flip32(val);
return 0;
} else {
@ -215,11 +263,11 @@ static int capn_set32(struct capn_ptr *p, int off, uint32_t val) {
}
}
static uint64_t capn_get64(const struct capn_ptr *p, int off) {
return (p->type == CAPN_STRUCT && off < p->datasz) ? capn_flip64(*(uint64_t*) p->data) : 0;
CAPN_INLINE uint64_t capn_get64(const struct capn_ptr *p, int off) {
return off < p->datasz ? capn_flip64(*(uint64_t*) p->data) : 0;
}
static int capn_set64(struct capn_ptr *p, int off, uint64_t val) {
if (p->type == CAPN_STRUCT && off < p->datasz) {
CAPN_INLINE int capn_set64(struct capn_ptr *p, int off, uint64_t val) {
if (off < p->datasz) {
*(uint64_t*) p->data = capn_flip64(val);
return 0;
} else {
@ -227,13 +275,13 @@ static int capn_set64(struct capn_ptr *p, int off, uint64_t val) {
}
}
static float capn_get_float(const struct capn_ptr *p, int off, float def) {
CAPN_INLINE float capn_get_float(const struct capn_ptr *p, int off, float def) {
union { float f; uint32_t u;} u;
u.f = def;
u.u ^= capn_get32(p, off);
return u.f;
}
static int capn_set_float(struct capn_ptr *p, int off, float f, float def) {
CAPN_INLINE int capn_set_float(struct capn_ptr *p, int off, float f, float def) {
union { float f; uint32_t u;} u;
union { float f; uint32_t u;} d;
u.f = f;
@ -241,13 +289,13 @@ static int capn_set_float(struct capn_ptr *p, int off, float f, float def) {
return capn_set32(p, off, u.u ^ d.u);
}
static double capn_get_double(const struct capn_ptr *p, int off, double def) {
CAPN_INLINE double capn_get_double(const struct capn_ptr *p, int off, double def) {
union { double f; uint64_t u;} u;
u.f = def;
u.u ^= capn_get64(p, off);
return u.f;
}
static int capn_set_double(struct capn_ptr *p, int off, double f, double def) {
CAPN_INLINE int capn_set_double(struct capn_ptr *p, int off, double f, double def) {
union { double f; uint64_t u;} u;
union { double f; uint64_t u;} d;
d.f = f;