#include "test.h"
#if INCLUDE_TIMER_TEST
#include <pjlib.h>
#define LOOP 16
#define MIN_COUNT 250
#define MAX_COUNT (LOOP * MIN_COUNT)
#define MIN_DELAY 2
#define D (MAX_COUNT / 32000)
#define DELAY (D < MIN_DELAY ? MIN_DELAY : D)
#define THIS_FILE "timer_test"
{
}
static int test_timer_heap(void)
{
int i, j;
int err=0;
unsigned count;
PJ_LOG(3,(
"test",
"...Basic test"));
if (!pool) {
PJ_LOG(3,(
"test",
"...error: unable to create pool of %u bytes",
size));
return -10;
}
if (!entry)
return -20;
for (i=0; i<MAX_COUNT; ++i) {
entry[i].
cb = &timer_callback;
}
app_perror("...error: unable to create timer heap", status);
return -30;
}
count = MIN_COUNT;
for (i=0; i<LOOP; ++i) {
int early = 0;
int cancelled=0;
int rc;
for (j=0; j<(int)count; ++j) {
if (rc != 0)
return -40;
if (rc > 0) {
early += rc;
}
}
do {
if (rc > 0) {
cancelled += rc;
}
#if defined(PJ_SYMBIAN) && PJ_SYMBIAN!=0
rc = 0;
++rc;
#else
#endif
if (rc > 0) {
}
PJ_LOG(3, (THIS_FILE,
"ERROR: %d timers left",
++err;
}
t_cancel.
u32.
lo /= count;
"...ok (count:%d, early:%d, cancelled:%d, "
"sched:%d, cancel:%d poll:%d)",
count, early, cancelled, t_sched.
u32.
lo, t_cancel.
u32.
lo,
count = count * 2;
if (count > MAX_COUNT)
break;
}
return err;
}
#define RANDOMIZED_TEST 1
#define SIMULATE_CRASH PJ_TIMER_USE_COPY
#if RANDOMIZED_TEST
#define ST_STRESS_THREAD_COUNT 20
#define ST_POLL_THREAD_COUNT 0
#define ST_CANCEL_THREAD_COUNT 0
#else
#define ST_STRESS_THREAD_COUNT 0
#define ST_POLL_THREAD_COUNT 10
#define ST_CANCEL_THREAD_COUNT 10
#endif
#define ST_ENTRY_COUNT 10000
#define ST_DURATION 30000
#define ST_ENTRY_MAX_TIMEOUT_MS ST_DURATION/10
#define ST_ENTRY_GROUP_LOCK_COUNT 1
#define BT_ENTRY_COUNT 100000
#define BT_ENTRY_SHOW_START 100
#define BT_ENTRY_SHOW_MULT 10
#define BT_REPEAT_RANDOM_TEST 4
#define BT_REPEAT_INC_TEST 4
{
int err;
struct {
unsigned cnt;
} stat[ST_POLL_THREAD_COUNT + ST_CANCEL_THREAD_COUNT + 1];
};
{
if (ST_ENTRY_GROUP_LOCK_COUNT &&
pj_rand() % 10) {
grp_lock = tparam->grp_locks[
pj_rand() % ST_ENTRY_GROUP_LOCK_COUNT];
}
return status;
}
{
}
{
#if RANDOMIZED_TEST
#endif
if (!ST_STRESS_THREAD_COUNT)
st_schedule_entry(ht, e);
}
static int stress_worker(void *arg)
{
enum {
SCHEDULING = 0,
CANCELLING = 1,
POLLING = 2,
NOTHING = 3
};
int prob[3] = {75, 15, 5};
int t_idx, i;
PJ_LOG(4,(
"test",
"...thread #%d (random) started", t_idx));
while (!tparam->stopping) {
int job, task;
int idx, count;
if (job < prob[0]) task = SCHEDULING;
else if (job < (prob[0] + prob[1])) task = CANCELLING;
else if (job < (prob[0] + prob[1] + prob[2])) task = POLLING;
else task = NOTHING;
if (task == SCHEDULING) {
if (prev_status != 0) continue;
status = st_schedule_entry(tparam->timer, &tparam->entries[idx]);
PJ_LOG(3,(
"test",
"race schedule-schedule %d: %p",
idx, &tparam->entries[idx]));
} else {
if (tparam->err != 0) tparam->err = -210;
PJ_LOG(3,(
"test",
"error: failed to schedule entry %d: %p",
idx, &tparam->entries[idx]));
}
}
else if (prev_status == 1 && status ==
PJ_SUCCESS) {
PJ_LOG(3,(
"test",
"race schedule-cancel/poll %d: %p",
idx, &tparam->entries[idx]));
}
}
} else if (task == CANCELLING) {
&tparam->entries[idx], 10);
if (prev_status == 0 && count > 0) {
PJ_LOG(3,(
"test",
"race cancel-schedule %d: %p",
idx, &tparam->entries[idx]));
} else {
if (tparam->err != 0) tparam->err = -220;
PJ_LOG(3,(
"test",
"error: cancelling invalid entry %d: %p",
idx, &tparam->entries[idx]));
}
} else if (prev_status == 1 && count == 0) {
PJ_LOG(3,(
"test",
"race cancel-poll %d: %p",
idx, &tparam->entries[idx]));
} else {
if (tparam->err != 0) tparam->err = -230;
PJ_LOG(3,(
"test",
"error: failed to cancel entry %d: %p",
idx, &tparam->entries[idx]));
}
}
if (count > 0) {
}
} else if (task == POLLING) {
for (i = 0; i < count; i++) {
}
} else {
}
}
PJ_LOG(4,(
"test",
"...thread #%d (poll) stopped", t_idx));
return 0;
}
static int poll_worker(void *arg)
{
int idx;
tparam->stat[idx].is_poll =
PJ_TRUE;
PJ_LOG(4,(
"test",
"...thread #%d (poll) started", idx));
while (!tparam->stopping) {
unsigned count;
if (count > 0) {
PJ_LOG(5,(
"test",
"...thread #%d called %d entries",
idx, count));
tparam->stat[idx].cnt += count;
} else {
}
}
PJ_LOG(4,(
"test",
"...thread #%d (poll) stopped", idx));
return 0;
}
static int cancel_worker(void *arg)
{
int idx;
PJ_LOG(4,(
"test",
"...thread #%d (cancel) started", idx));
while (!tparam->stopping) {
int count;
if (count > 0) {
PJ_LOG(5,(
"test",
"...thread #%d cancelled %d entries",
idx, count));
tparam->stat[idx].cnt += count;
st_schedule_entry(tparam->timer, e);
}
}
PJ_LOG(4,(
"test",
"...thread #%d (cancel) stopped", idx));
return 0;
}
static int timer_stress_test(void)
{
unsigned count = 0, n_sched = 0, n_cancel = 0, n_poll = 0;
int i;
int err=0;
#if SIMULATE_CRASH
#endif
PJ_LOG(3,(
"test",
"...Stress test"));
if (!pool) {
PJ_LOG(3,(
"test",
"...error: unable to create pool"));
err = -10;
goto on_return;
}
app_perror("...error: unable to create timer heap", status);
err = -20;
goto on_return;
}
app_perror("...error: unable to create lock", status);
err = -30;
goto on_return;
}
if (ST_ENTRY_GROUP_LOCK_COUNT) {
tparam.grp_locks = grp_locks;
}
for (i=0; i<ST_ENTRY_GROUP_LOCK_COUNT; ++i) {
app_perror("...error: unable to create group lock", status);
err = -40;
goto on_return;
}
}
sizeof(*entries));
if (!entries) {
err = -50;
goto on_return;
}
sizeof(*entries_status));
if (!entries_status) {
err = -55;
goto on_return;
}
for (i=0; i<ST_ENTRY_COUNT; ++i) {
err = -60;
goto on_return;
}
if (!ST_STRESS_THREAD_COUNT) {
status = st_schedule_entry(timer, &entries[i]);
app_perror("...error: unable to schedule entry", status);
err = -60;
goto on_return;
}
}
}
tparam.timer = timer;
tparam.entries = entries;
tparam.status = entries_status;
app_perror("...error: unable to create atomic", status);
err = -70;
goto on_return;
}
if (ST_STRESS_THREAD_COUNT) {
}
for (i=0; i<ST_STRESS_THREAD_COUNT; ++i) {
0, 0, &stress_threads[i]);
app_perror("...error: unable to create stress thread", status);
err = -75;
goto on_return;
}
}
if (ST_POLL_THREAD_COUNT) {
}
for (i=0; i<ST_POLL_THREAD_COUNT; ++i) {
0, 0, &poll_threads[i]);
app_perror("...error: unable to create poll thread", status);
err = -80;
goto on_return;
}
}
if (ST_CANCEL_THREAD_COUNT) {
}
for (i=0; i<ST_CANCEL_THREAD_COUNT; ++i) {
0, 0, &cancel_threads[i]);
app_perror("...error: unable to create cancel thread", status);
err = -90;
goto on_return;
}
}
#if SIMULATE_CRASH
PJ_LOG(3,(
"test",
"...Releasing timer entry %p without cancelling it",
entry));
#endif
on_return:
PJ_LOG(3,(
"test",
"...Cleaning up resources"));
for (i=0; i<ST_STRESS_THREAD_COUNT; ++i) {
if (!stress_threads[i])
continue;
}
for (i=0; i<ST_POLL_THREAD_COUNT; ++i) {
if (!poll_threads[i])
continue;
}
for (i=0; i<ST_CANCEL_THREAD_COUNT; ++i) {
if (!cancel_threads[i])
continue;
}
for (i=0; i<ST_POLL_THREAD_COUNT+ST_CANCEL_THREAD_COUNT; ++i) {
PJ_LOG(3,(
"test",
"...Thread #%d (%s) executed %d entries",
i, (tparam.stat[i].is_poll? "poll":"cancel"),
tparam.stat[i].cnt));
}
for (i=0; i<ST_ENTRY_COUNT; ++i) {
if (entries_status)
}
for (i=0; i<ST_ENTRY_GROUP_LOCK_COUNT; ++i) {
pj_assert(!
"Group lock ref count must be equal to 1");
if (!err) err = -100;
}
}
if (timer)
PJ_LOG(3,(
"test",
"Total memory of timer heap: %d",
if (tparam.idx)
if (tparam.n_sched) {
PJ_LOG(3,(
"test",
"Total number of scheduled entries: %d", n_sched));
}
if (tparam.n_cancel) {
PJ_LOG(3,(
"test",
"Total number of cancelled entries: %d", n_cancel));
}
if (tparam.n_poll) {
PJ_LOG(3,(
"test",
"Total number of polled entries: %d", n_poll));
}
PJ_LOG(3,(
"test",
"Number of remaining active entries: %d", count));
if (n_sched) {
#if SIMULATE_CRASH
n_sched++;
#endif
if (n_sched != (n_cancel + n_poll + count)) {
if (tparam.err != 0) tparam.err = -250;
}
PJ_LOG(3,(
"test",
"Scheduled = cancelled + polled + remaining?: %s",
(match? "yes": "no")));
}
return (err? err: tparam.err);
}
static int get_random_delay()
{
}
static int get_next_delay(int delay)
{
return ++delay;
}
typedef enum BENCH_TEST_TYPE {
RANDOM_SCH = 0,
RANDOM_CAN = 1,
INCREMENT_SCH = 2,
INCREMENT_CAN = 3
} BENCH_TEST_TYPE;
static char *get_test_name(BENCH_TEST_TYPE test_type) {
switch (test_type) {
case RANDOM_SCH:
case INCREMENT_SCH:
return "schedule";
case RANDOM_CAN:
case INCREMENT_CAN:
return "cancel";
}
return "undefined";
}
static void *get_format_num(unsigned n, char *out)
{
int c;
char buf[64];
char *p;
pj_ansi_snprintf(buf, 64, "%d", n);
c = 2 - pj_ansi_strlen(buf) % 3;
for (p = buf; *p != 0; ++p) {
*out++ = *p;
if (c == 1) {
*out++ = ',';
}
c = (c + 1) % 3;
}
*--out = 0;
return out;
}
static void print_bench(BENCH_TEST_TYPE test_type,
pj_timestamp time_freq,
{
char start_idx_str[64];
char end_idx_str[64];
char num_req_str[64];
unsigned num_req;
num_req = (unsigned)(time_freq.u64 * (end_idx-start_idx) / t2.u64);
if (test_type == RANDOM_CAN || test_type == INCREMENT_CAN) {
start_idx = BT_ENTRY_COUNT - start_idx;
end_idx = BT_ENTRY_COUNT - end_idx;
}
get_format_num(start_idx, start_idx_str);
get_format_num(end_idx, end_idx_str);
get_format_num(num_req, num_req_str);
PJ_LOG(3, (THIS_FILE,
" Entries %s-%s: %s %s ent/sec",
start_idx_str, end_idx_str, get_test_name(test_type),
num_req_str));
}
BENCH_TEST_TYPE test_type)
{
unsigned mult = BT_ENTRY_SHOW_START;
int i, j;
for (i=0, j=0; j < BT_ENTRY_COUNT; ++j) {
if (test_type == RANDOM_SCH || test_type == INCREMENT_SCH) {
if (test_type == RANDOM_SCH)
delay.
msec = get_random_delay();
else
delay.
msec = get_next_delay(delay.
msec);
app_perror("...error: unable to schedule timer entry", status);
return -50;
}
} else if (test_type == RANDOM_CAN || test_type == INCREMENT_CAN) {
if (num_ent == 0) {
PJ_LOG(3, (
"test",
"...error: unable to cancel timer entry"));
return -60;
}
} else {
return -70;
}
if (j && (j % mult) == 0) {
print_bench(test_type, freq, t1, i, j);
i = j+1;
mult *= BT_ENTRY_SHOW_MULT;
}
}
if (j > 0 && ((j-1) % mult != 0)) {
print_bench(test_type, freq, t1, i, j);
}
return 0;
}
static int timer_bench_test(void)
{
int err=0;
int i;
PJ_LOG(3,(
"test",
"...Benchmark test"));
PJ_LOG(3,(
"test",
"...error: unable to get timestamp freq"));
err = -10;
goto on_return;
}
if (!pool) {
PJ_LOG(3,(
"test",
"...error: unable to create pool"));
err = -20;
goto on_return;
}
app_perror("...error: unable to create timer heap", status);
err = -30;
goto on_return;
}
sizeof(*entries));
if (!entries) {
err = -40;
goto on_return;
}
PJ_LOG(3,(
"test",
"....random scheduling/cancelling test.."));
for (i = 0; i < BT_REPEAT_RANDOM_TEST; ++i) {
PJ_LOG(3,(
"test",
" test %d of %d..", i+1, BT_REPEAT_RANDOM_TEST));
err = bench_test(timer, entries, freq, RANDOM_SCH);
if (err < 0)
goto on_return;
err = bench_test(timer, entries, freq, RANDOM_CAN);
if (err < 0)
goto on_return;
}
PJ_LOG(3,(
"test",
"....increment scheduling/cancelling test.."));
for (i = 0; i < BT_REPEAT_INC_TEST; ++i) {
PJ_LOG(3,(
"test",
" test %d of %d..", i+1, BT_REPEAT_INC_TEST));
err = bench_test(timer, entries, freq, INCREMENT_SCH);
if (err < 0)
goto on_return;
err = bench_test(timer, entries, freq, INCREMENT_CAN);
if (err < 0)
goto on_return;
}
on_return:
PJ_LOG(3,(
"test",
"...Cleaning up resources"));
if (pool)
return err;
}
int timer_test()
{
int rc;
rc = test_timer_heap();
if (rc != 0)
return rc;
rc = timer_stress_test();
if (rc != 0)
return rc;
#if WITH_BENCHMARK
rc = timer_bench_test();
if (rc != 0)
return rc;
#endif
return 0;
}
#else
int dummy_timer_test;
#endif
pj_status_t pj_atomic_create(pj_pool_t *pool, pj_atomic_value_t initial, pj_atomic_t **atomic)
void pj_atomic_inc(pj_atomic_t *atomic_var)
pj_status_t pj_atomic_destroy(pj_atomic_t *atomic_var)
pj_atomic_value_t pj_atomic_inc_and_get(pj_atomic_t *atomic_var)
pj_atomic_value_t pj_atomic_get(pj_atomic_t *atomic_var)
void pj_atomic_set(pj_atomic_t *atomic_var, pj_atomic_value_t value)
int pj_bool_t
Definition: types.h:71
struct pj_lock_t pj_lock_t
Definition: types.h:239
struct pj_atomic_t pj_atomic_t
Definition: types.h:226
size_t pj_size_t
Definition: types.h:58
int pj_status_t
Definition: types.h:68
struct pj_thread_t pj_thread_t
Definition: types.h:236
struct pj_grp_lock_t pj_grp_lock_t
Definition: types.h:242
struct pj_timer_heap_t pj_timer_heap_t
Definition: types.h:221
@ PJ_SUCCESS
Definition: types.h:93
@ PJ_TRUE
Definition: types.h:96
@ PJ_FALSE
Definition: types.h:99
pj_status_t pj_grp_lock_add_ref(pj_grp_lock_t *grp_lock)
int pj_grp_lock_get_ref(pj_grp_lock_t *grp_lock)
pj_status_t pj_grp_lock_create(pj_pool_t *pool, const pj_grp_lock_config *cfg, pj_grp_lock_t **p_grp_lock)
pj_status_t pj_grp_lock_dec_ref(pj_grp_lock_t *grp_lock)
pj_status_t pj_lock_create_recursive_mutex(pj_pool_t *pool, const char *name, pj_lock_t **lock)
#define PJ_LOG(level, arg)
Definition: log.h:106
void pj_pool_secure_release(pj_pool_t **ppool)
pj_pool_t * pj_pool_create(pj_pool_factory *factory, const char *name, pj_size_t initial_size, pj_size_t increment_size, pj_pool_callback *callback)
void * pj_pool_calloc(pj_pool_t *pool, pj_size_t count, pj_size_t elem)
void pj_pool_safe_release(pj_pool_t **ppool)
void pj_pool_release(pj_pool_t *pool)
void pj_srand(unsigned int seed)
pj_bool_t pj_symbianos_poll(int priority, int ms_timeout)
pj_status_t pj_thread_destroy(pj_thread_t *thread)
pj_status_t pj_thread_join(pj_thread_t *thread)
pj_status_t pj_thread_create(pj_pool_t *pool, const char *thread_name, pj_thread_proc *proc, void *arg, pj_size_t stack_size, unsigned flags, pj_thread_t **thread)
pj_status_t pj_thread_sleep(unsigned msec)
pj_size_t pj_timer_heap_count(pj_timer_heap_t *ht)
pj_status_t pj_timer_heap_schedule_w_grp_lock(pj_timer_heap_t *ht, pj_timer_entry *entry, const pj_time_val *delay, int id_val, pj_grp_lock_t *grp_lock)
int pj_timer_heap_cancel_if_active(pj_timer_heap_t *ht, pj_timer_entry *entry, int id_val)
int pj_timer_heap_cancel(pj_timer_heap_t *ht, pj_timer_entry *entry)
unsigned pj_timer_heap_poll(pj_timer_heap_t *ht, pj_time_val *next_delay)
void pj_timer_heap_destroy(pj_timer_heap_t *ht)
pj_status_t pj_timer_heap_schedule(pj_timer_heap_t *ht, pj_timer_entry *entry, const pj_time_val *delay)
pj_timer_entry * pj_timer_entry_init(pj_timer_entry *entry, int id, void *user_data, pj_timer_heap_callback *cb)
pj_size_t pj_timer_heap_mem_size(pj_size_t count)
pj_status_t pj_timer_heap_create(pj_pool_t *pool, pj_size_t count, pj_timer_heap_t **ht)
void pj_timer_heap_set_lock(pj_timer_heap_t *ht, pj_lock_t *lock, pj_bool_t auto_del)
pj_status_t pj_get_timestamp_freq(pj_timestamp *freq)
pj_status_t pj_get_timestamp(pj_timestamp *ts)
void pj_sub_timestamp(pj_timestamp *t1, const pj_timestamp *t2)
Definition: os.h:1285
pj_status_t pj_gettimeofday(pj_time_val *tv)
void pj_time_val_normalize(pj_time_val *t)
#define PJ_TIME_VAL_LTE(t1, t2)
Definition: types.h:473
#define PJ_TIME_VAL_ADD(t1, t2)
Definition: types.h:483
#define pj_assert(expr)
Definition: assert.h:48
#define PJ_UNUSED_ARG(arg)
Definition: config.h:1343
long msec
Definition: types.h:402
long sec
Definition: types.h:399
pj_timer_heap_callback * cb
Definition: timer.h:118
void * user_data
Definition: timer.h:106
pj_uint32_t lo
Definition: types.h:142
struct pj_timestamp::@9 u32