Skip to content

Commit

Permalink
Merge upstream ConcurrencyKit up to commit 005c715.
Browse files Browse the repository at this point in the history
  • Loading branch information
akopytov committed Dec 11, 2017
1 parent 6a8770c commit ba9b455
Show file tree
Hide file tree
Showing 30 changed files with 892 additions and 159 deletions.
1 change: 1 addition & 0 deletions third_party/concurrency_kit/ck/build/ck.build.s390x
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
CFLAGS+=-O2 -D__s390x__
27 changes: 19 additions & 8 deletions third_party/concurrency_kit/ck/configure
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ WANT_PIC=yes

P_PWD=`pwd`
MAINTAINER='[email protected]'
VERSION=${VERSION:-'0.6.0'}
VERSION=${VERSION:-'1.0.0'}
VERSION_MAJOR='0'
BUILD="$PWD/build/ck.build"
PREFIX=${PREFIX:-"/usr/local"}
Expand Down Expand Up @@ -169,7 +169,8 @@ generate_stdout()
for option; do
case "$option" in
*=?*)
value=`expr -- "$option" : '[^=]*=\(.*\)'`
optname=`echo $option|cut -c 3-`
value=`expr "$optname" : '[^=]*=\(.*\)'`
;;
*=)
value=
Expand Down Expand Up @@ -294,7 +295,8 @@ for option; do
fi
;;
*=*)
NAME=`expr -- "$option" : '\([^=]*\)='`
optname=`echo $option|cut -c 3-`
NAME=`expr "$optname" : '\([^=]*\)='`
eval "$NAME='$value'"
export $NAME
;;
Expand Down Expand Up @@ -347,7 +349,7 @@ case "$SYSTEM" in
DCORES=`sysctl -n hw.ncpu`
SYSTEM=darwin
;;
MINGW32*)
MINGW32*|MSYS_NT*)
SYSTEM=mingw32
LDFLAGS="-mthreads $LDFLAGS"
;;
Expand Down Expand Up @@ -482,6 +484,13 @@ case $PLATFORM in
PLATFORM=aarch64
ENVIRONMENT=64
;;
"s390x")
RTM_ENABLE="CK_MD_RTM_DISABLE"
LSE_ENABLE="CK_MD_LSE_DISABLE"
MM="${MM:-"CK_MD_RMO"}"
PLATFORM=s390x
ENVIRONMENT=64
;;
*)
RTM_ENABLE="CK_MD_RTM_DISABLE"
LSE_ENABLE="CK_MD_LSE_DISABLE"
Expand Down Expand Up @@ -561,9 +570,11 @@ else
fi

printf "Finding suitable compiler........"
CC=`pathsearch "${CC:-cc}"`
if test -z "$CC" -o ! -x "$CC"; then
CC=`pathsearch "${CC:-gcc}"`
if test ! -x "${CC}"; then
CC=`pathsearch "${CC:-cc}"`
if test -z "$CC" -o ! -x "$CC"; then
CC=`pathsearch "${CC:-gcc}"`
fi
fi
assert "$CC" "not found"

Expand Down Expand Up @@ -596,7 +607,7 @@ int main(void) {
EOF

$CC -o .1 .1.c
COMPILER=`./.1`
COMPILER=`./.1 2> /dev/null`
r=$?
rm -f .1.c .1

Expand Down
8 changes: 6 additions & 2 deletions third_party/concurrency_kit/ck/doc/ck_epoch_register
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Concurrency Kit (libck, \-lck)
.Sh SYNOPSIS
.In ck_epoch.h
.Ft void
.Fn ck_epoch_register "ck_epoch_t *epoch" "ck_epoch_record_t *record"
.Fn ck_epoch_register "ck_epoch_t *epoch" "ck_epoch_record_t *record" "void *cl"
.Sh DESCRIPTION
The
.Fn ck_epoch_register 3
Expand All @@ -49,7 +49,11 @@ object pointed to by the
argument will have lifetime managed by the underlying epoch sub-system.
The record object must not be destroyed after it is associated with a
.Fn ck_epoch_register 3
call.
call. An optional context pointer
.Fa cl
may be passed that is retrievable with the
.Fn ck_epoch_record_ct 3
function.
.Sh RETURN VALUES
This function has no return value.
.Sh SEE ALSO
Expand Down
95 changes: 84 additions & 11 deletions third_party/concurrency_kit/ck/include/ck_epoch.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ struct ck_epoch_ref {
};

struct ck_epoch_record {
ck_stack_entry_t record_next;
struct ck_epoch *global;
unsigned int state;
unsigned int epoch;
Expand All @@ -92,25 +93,31 @@ struct ck_epoch_record {
} local CK_CC_CACHELINE;
unsigned int n_pending;
unsigned int n_peak;
unsigned long n_dispatch;
unsigned int n_dispatch;
void *ct;
ck_stack_t pending[CK_EPOCH_LENGTH];
ck_stack_entry_t record_next;
} CK_CC_CACHELINE;
typedef struct ck_epoch_record ck_epoch_record_t;

struct ck_epoch {
unsigned int epoch;
char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
ck_stack_t records;
unsigned int n_free;
ck_stack_t records;
};
typedef struct ck_epoch ck_epoch_t;

/*
* Internal functions.
*/
void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *);
void _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);
bool _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);

CK_CC_FORCE_INLINE static void *
ck_epoch_record_ct(const ck_epoch_record_t *record)
{

return ck_pr_load_ptr(&record->ct);
}

/*
* Marks the beginning of an epoch-protected section.
Expand Down Expand Up @@ -160,25 +167,30 @@ ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section)
}

/*
* Marks the end of an epoch-protected section.
* Marks the end of an epoch-protected section. Returns true if no more
* sections exist for the caller.
*/
CK_CC_FORCE_INLINE static void
CK_CC_FORCE_INLINE static bool
ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)
{

ck_pr_fence_release();
ck_pr_store_uint(&record->active, record->active - 1);

if (section != NULL)
_ck_epoch_delref(record, section);
return _ck_epoch_delref(record, section);

return;
return record->active == 0;
}

/*
* Defers the execution of the function pointed to by the "cb"
* argument until an epoch counter loop. This allows for a
* non-blocking deferral.
*
* We can get away without a fence here due to the monotonic nature
* of the epoch counter. Worst case, this will result in some delays
* before object destruction.
*/
CK_CC_FORCE_INLINE static void
ck_epoch_call(ck_epoch_record_t *record,
Expand All @@ -195,13 +207,74 @@ ck_epoch_call(ck_epoch_record_t *record,
return;
}

/*
* Same as ck_epoch_call, but allows for records to be shared and is reentrant.
*/
CK_CC_FORCE_INLINE static void
ck_epoch_call_strict(ck_epoch_record_t *record,
ck_epoch_entry_t *entry,
ck_epoch_cb_t *function)
{
struct ck_epoch *epoch = record->global;
unsigned int e = ck_pr_load_uint(&epoch->epoch);
unsigned int offset = e & (CK_EPOCH_LENGTH - 1);

ck_pr_inc_uint(&record->n_pending);
entry->function = function;

/* Store fence is implied by push operation. */
ck_stack_push_upmc(&record->pending[offset], &entry->stack_entry);
return;
}

/*
* This callback is used for synchronize_wait to allow for custom blocking
* behavior.
*/
typedef void ck_epoch_wait_cb_t(ck_epoch_t *, ck_epoch_record_t *,
void *);

/*
* Return latest epoch value. This operation provides load ordering.
*/
CK_CC_FORCE_INLINE static unsigned int
ck_epoch_value(const ck_epoch_t *ep)
{

ck_pr_fence_load();
return ck_pr_load_uint(&ep->epoch);
}

void ck_epoch_init(ck_epoch_t *);
ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);

/*
* Attempts to recycle an unused epoch record. If one is successfully
* allocated, the record context pointer is also updated.
*/
ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *, void *);

/*
* Registers an epoch record. An optional context pointer may be passed that
* is retrievable with ck_epoch_record_ct.
*/
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *);

/*
* Marks a record as available for re-use by a subsequent recycle operation.
* Note that the record cannot be physically destroyed.
*/
void ck_epoch_unregister(ck_epoch_record_t *);

bool ck_epoch_poll(ck_epoch_record_t *);
void ck_epoch_synchronize(ck_epoch_record_t *);
void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *);
void ck_epoch_barrier(ck_epoch_record_t *);
void ck_epoch_barrier_wait(ck_epoch_record_t *, ck_epoch_wait_cb_t *, void *);

/*
* Reclaim entries associated with a record. This is safe to call only on
* the caller's record or records that are using call_strict.
*/
void ck_epoch_reclaim(ck_epoch_record_t *);

#endif /* CK_EPOCH_H */
4 changes: 3 additions & 1 deletion third_party/concurrency_kit/ck/include/ck_hs.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,11 @@ struct ck_hs_stat {
struct ck_hs_iterator {
void **cursor;
unsigned long offset;
struct ck_hs_map *map;
};
typedef struct ck_hs_iterator ck_hs_iterator_t;

#define CK_HS_ITERATOR_INITIALIZER { NULL, 0 }
#define CK_HS_ITERATOR_INITIALIZER { NULL, 0, NULL }

/* Convenience wrapper to table hash function. */
#define CK_HS_HASH(T, F, K) F((K), (T)->seed)
Expand All @@ -112,6 +113,7 @@ typedef void *ck_hs_apply_fn_t(void *, void *);
bool ck_hs_apply(ck_hs_t *, unsigned long, const void *, ck_hs_apply_fn_t *, void *);
void ck_hs_iterator_init(ck_hs_iterator_t *);
bool ck_hs_next(ck_hs_t *, ck_hs_iterator_t *, void **);
bool ck_hs_next_spmc(ck_hs_t *, ck_hs_iterator_t *, void **);
bool ck_hs_move(ck_hs_t *, ck_hs_t *, ck_hs_hash_cb_t *,
ck_hs_compare_cb_t *, struct ck_malloc *);
bool ck_hs_init(ck_hs_t *, unsigned int, ck_hs_hash_cb_t *,
Expand Down
2 changes: 2 additions & 0 deletions third_party/concurrency_kit/ck/include/ck_pr.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
#include "gcc/sparcv9/ck_pr.h"
#elif defined(__ppc64__)
#include "gcc/ppc64/ck_pr.h"
#elif defined(__s390x__)
#include "gcc/s390x/ck_pr.h"
#elif defined(__ppc__)
#include "gcc/ppc/ck_pr.h"
#elif defined(__arm__)
Expand Down
2 changes: 1 addition & 1 deletion third_party/concurrency_kit/ck/include/ck_queue.h
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ struct { \
* Singly-linked Tail queue functions.
*/
#define CK_STAILQ_CONCAT(head1, head2) do { \
if ((head2)->stqh_first == NULL) { \
if ((head2)->stqh_first != NULL) { \
ck_pr_store_ptr((head1)->stqh_last, (head2)->stqh_first); \
ck_pr_fence_store(); \
(head1)->stqh_last = (head2)->stqh_last; \
Expand Down
51 changes: 41 additions & 10 deletions third_party/concurrency_kit/ck/include/ck_ring.h
Original file line number Diff line number Diff line change
Expand Up @@ -176,23 +176,54 @@ _ck_ring_enqueue_mp(struct ck_ring *ring,

producer = ck_pr_load_uint(&ring->p_head);

do {
for (;;) {
/*
* The snapshot of producer must be up to date with
* respect to consumer.
* The snapshot of producer must be up to date with respect to
* consumer.
*/
ck_pr_fence_load();
consumer = ck_pr_load_uint(&ring->c_head);

delta = producer + 1;
if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask))) {
r = false;
goto leave;

/*
* Only try to CAS if the producer is not clearly stale (not
* less than consumer) and the buffer is definitely not full.
*/
if (CK_CC_LIKELY((producer - consumer) < mask)) {
if (ck_pr_cas_uint_value(&ring->p_head,
producer, delta, &producer) == true) {
break;
}
} else {
unsigned int new_producer;

/*
* Slow path. Either the buffer is full or we have a
* stale snapshot of p_head. Execute a second read of
* p_read that must be ordered wrt the snapshot of
* c_head.
*/
ck_pr_fence_load();
new_producer = ck_pr_load_uint(&ring->p_head);

/*
* Only fail if we haven't made forward progress in
* production: the buffer must have been full when we
* read new_producer (or we wrapped around UINT_MAX
* during this iteration).
*/
if (producer == new_producer) {
r = false;
goto leave;
}

/*
* p_head advanced during this iteration. Try again.
*/
producer = new_producer;
}
} while (ck_pr_cas_uint_value(&ring->p_head,
producer,
delta,
&producer) == false);
}

buffer = (char *)buffer + ts * (producer & mask);
memcpy(buffer, entry, ts);
Expand Down
Loading

0 comments on commit ba9b455

Please sign in to comment.