timer.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. /*
  2. * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
  3. * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include "test.h"
  20. /**
  21. * \page page_pjlib_timer_test Test: Timer
  22. *
  23. * This file provides implementation of \b timer_test(). It tests the
  24. * functionality of the timer heap.
  25. *
  26. *
  27. * This file is <b>pjlib-test/timer.c</b>
  28. *
  29. * \include pjlib-test/timer.c
  30. */
  31. #if INCLUDE_TIMER_TEST
  32. #include <pjlib.h>
  33. #define LOOP 16
  34. #define MIN_COUNT 250
  35. #define MAX_COUNT (LOOP * MIN_COUNT)
  36. #define MIN_DELAY 2
  37. #define D (MAX_COUNT / 32000)
  38. #define DELAY (D < MIN_DELAY ? MIN_DELAY : D)
  39. #define THIS_FILE "timer_test"
  40. static void timer_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
  41. {
  42. PJ_UNUSED_ARG(ht);
  43. PJ_UNUSED_ARG(e);
  44. }
  45. static int test_timer_heap(void)
  46. {
  47. int i, j;
  48. pj_timer_entry *entry;
  49. pj_pool_t *pool;
  50. pj_timer_heap_t *timer;
  51. pj_time_val delay;
  52. pj_status_t status;
  53. int err=0;
  54. pj_size_t size;
  55. unsigned count;
  56. PJ_LOG(3,("test", "...Basic test"));
  57. size = pj_timer_heap_mem_size(MAX_COUNT)+MAX_COUNT*sizeof(pj_timer_entry);
  58. pool = pj_pool_create( mem, NULL, size, 4000, NULL);
  59. if (!pool) {
  60. PJ_LOG(3,("test", "...error: unable to create pool of %lu bytes",
  61. (unsigned long)size));
  62. return -10;
  63. }
  64. entry = (pj_timer_entry*)pj_pool_calloc(pool, MAX_COUNT, sizeof(*entry));
  65. if (!entry)
  66. return -20;
  67. for (i=0; i<MAX_COUNT; ++i) {
  68. entry[i].cb = &timer_callback;
  69. }
  70. status = pj_timer_heap_create(pool, MAX_COUNT, &timer);
  71. if (status != PJ_SUCCESS) {
  72. app_perror("...error: unable to create timer heap", status);
  73. return -30;
  74. }
  75. count = MIN_COUNT;
  76. for (i=0; i<LOOP; ++i) {
  77. int early = 0;
  78. //int done=0;
  79. int cancelled=0;
  80. int rc;
  81. pj_timestamp t1, t2, t_sched, t_cancel, t_poll;
  82. pj_time_val now, expire;
  83. pj_gettimeofday(&now);
  84. pj_srand(now.sec);
  85. t_sched.u32.lo = t_cancel.u32.lo = t_poll.u32.lo = 0;
  86. // Register timers
  87. for (j=0; j<(int)count; ++j) {
  88. delay.sec = pj_rand() % DELAY;
  89. delay.msec = pj_rand() % 1000;
  90. // Schedule timer
  91. pj_get_timestamp(&t1);
  92. rc = pj_timer_heap_schedule(timer, &entry[j], &delay);
  93. if (rc != 0)
  94. return -40;
  95. pj_get_timestamp(&t2);
  96. t_sched.u32.lo += (t2.u32.lo - t1.u32.lo);
  97. // Poll timers.
  98. pj_get_timestamp(&t1);
  99. rc = pj_timer_heap_poll(timer, NULL);
  100. pj_get_timestamp(&t2);
  101. if (rc > 0) {
  102. t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
  103. early += rc;
  104. }
  105. }
  106. // Set the time where all timers should finish
  107. pj_gettimeofday(&expire);
  108. delay.sec = DELAY;
  109. delay.msec = 0;
  110. PJ_TIME_VAL_ADD(expire, delay);
  111. // Wait unfil all timers finish, cancel some of them.
  112. do {
  113. int index = pj_rand() % count;
  114. pj_get_timestamp(&t1);
  115. rc = pj_timer_heap_cancel(timer, &entry[index]);
  116. pj_get_timestamp(&t2);
  117. if (rc > 0) {
  118. cancelled += rc;
  119. t_cancel.u32.lo += (t2.u32.lo - t1.u32.lo);
  120. }
  121. pj_gettimeofday(&now);
  122. pj_get_timestamp(&t1);
  123. #if defined(PJ_SYMBIAN) && PJ_SYMBIAN!=0
  124. /* On Symbian, we must use OS poll (Active Scheduler poll) since
  125. * timer is implemented using Active Object.
  126. */
  127. rc = 0;
  128. while (pj_symbianos_poll(-1, 0))
  129. ++rc;
  130. #else
  131. rc = pj_timer_heap_poll(timer, NULL);
  132. #endif
  133. pj_get_timestamp(&t2);
  134. if (rc > 0) {
  135. //done += rc;
  136. t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
  137. }
  138. } while (PJ_TIME_VAL_LTE(now, expire)&&pj_timer_heap_count(timer) > 0);
  139. if (pj_timer_heap_count(timer)) {
  140. PJ_LOG(3, (THIS_FILE, "ERROR: %lu timers left",
  141. (unsigned long)pj_timer_heap_count(timer)));
  142. ++err;
  143. }
  144. t_sched.u32.lo /= count;
  145. t_cancel.u32.lo /= count;
  146. t_poll.u32.lo /= count;
  147. PJ_LOG(4, (THIS_FILE,
  148. "...ok (count:%d, early:%d, cancelled:%d, "
  149. "sched:%d, cancel:%d poll:%d)",
  150. count, early, cancelled, t_sched.u32.lo, t_cancel.u32.lo,
  151. t_poll.u32.lo));
  152. count = count * 2;
  153. if (count > MAX_COUNT)
  154. break;
  155. }
  156. pj_pool_release(pool);
  157. return err;
  158. }
  159. /***************
  160. * Stress test *
  161. ***************
  162. * Test scenario (if RANDOMIZED_TEST is 0):
  163. * 1. Create and schedule a number of timer entries.
  164. * 2. Start threads for polling (simulating normal worker thread).
  165. * Each expired entry will try to cancel and re-schedule itself
  166. * from within the callback.
  167. * 3. Start threads for cancelling random entries. Each successfully
  168. * cancelled entry will be re-scheduled after some random delay.
  169. *
  170. * Test scenario (if RANDOMIZED_TEST is 1):
  171. * 1. Create and schedule a number of timer entries.
  172. * 2. Start threads which will, based on a configurable probability
  173. * setting, randomly perform timer scheduling, cancelling, or
  174. * polling (simulating normal worker thread).
  175. * This test is considered a failure if:
  176. * - It triggers assertion/crash.
  177. * - There's an error message in the log, which indicates a potential
  178. * bug in the implementation (note that race message is ok).
  179. */
  180. #define RANDOMIZED_TEST 1
  181. #define SIMULATE_CRASH PJ_TIMER_USE_COPY
  182. #if RANDOMIZED_TEST
  183. #define ST_STRESS_THREAD_COUNT 20
  184. #define ST_POLL_THREAD_COUNT 0
  185. #define ST_CANCEL_THREAD_COUNT 0
  186. #else
  187. #define ST_STRESS_THREAD_COUNT 0
  188. #define ST_POLL_THREAD_COUNT 10
  189. #define ST_CANCEL_THREAD_COUNT 10
  190. #endif
  191. #define ST_ENTRY_COUNT 10000
  192. #define ST_DURATION 30000
  193. #define ST_ENTRY_MAX_TIMEOUT_MS ST_DURATION/10
  194. /* Number of group lock, may be zero, shared by timer entries, group lock
  195. * can be useful to evaluate poll vs cancel race condition scenario, i.e:
  196. * each group lock must have ref count==1 at the end of the test, otherwise
  197. * assertion will raise.
  198. */
  199. #define ST_ENTRY_GROUP_LOCK_COUNT 1
  200. #define BT_ENTRY_COUNT 100000
  201. #define BT_ENTRY_SHOW_START 100
  202. #define BT_ENTRY_SHOW_MULT 10
  203. #define BT_REPEAT_RANDOM_TEST 4
  204. #define BT_REPEAT_INC_TEST 4
  205. struct thread_param
  206. {
  207. pj_timer_heap_t *timer;
  208. pj_bool_t stopping;
  209. pj_timer_entry *entries;
  210. pj_atomic_t **status;
  211. pj_atomic_t *n_sched, *n_cancel, *n_poll;
  212. pj_grp_lock_t **grp_locks;
  213. int err;
  214. pj_atomic_t *idx;
  215. struct {
  216. pj_bool_t is_poll;
  217. unsigned cnt;
  218. } stat[ST_POLL_THREAD_COUNT + ST_CANCEL_THREAD_COUNT + 1];
  219. /* Plus one here to avoid compile warning of zero-sized array */
  220. };
  221. static pj_status_t st_schedule_entry(pj_timer_heap_t *ht, pj_timer_entry *e)
  222. {
  223. pj_time_val delay = {0};
  224. pj_grp_lock_t *grp_lock = NULL;
  225. pj_status_t status;
  226. struct thread_param *tparam = (struct thread_param *)e->user_data;
  227. if (ST_ENTRY_GROUP_LOCK_COUNT && pj_rand() % 10) {
  228. /* About 90% of entries should have group lock */
  229. grp_lock = tparam->grp_locks[pj_rand() % ST_ENTRY_GROUP_LOCK_COUNT];
  230. }
  231. delay.msec = pj_rand() % ST_ENTRY_MAX_TIMEOUT_MS;
  232. pj_time_val_normalize(&delay);
  233. status = pj_timer_heap_schedule_w_grp_lock(ht, e, &delay, 1, grp_lock);
  234. return status;
  235. }
  236. static void dummy_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
  237. {
  238. PJ_UNUSED_ARG(ht);
  239. PJ_LOG(4,("test", "dummy callback called %p %p", e, e->user_data));
  240. }
  241. static void st_entry_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
  242. {
  243. struct thread_param *tparam = (struct thread_param *)e->user_data;
  244. #if RANDOMIZED_TEST
  245. /* Make sure the flag has been set. */
  246. while (pj_atomic_get(tparam->status[e - tparam->entries]) != 1)
  247. pj_thread_sleep(10);
  248. pj_atomic_set(tparam->status[e - tparam->entries], 0);
  249. #endif
  250. /* try to cancel this */
  251. pj_timer_heap_cancel_if_active(ht, e, 10);
  252. /* busy doing something */
  253. pj_thread_sleep(pj_rand() % 50);
  254. /* reschedule entry */
  255. if (!ST_STRESS_THREAD_COUNT)
  256. st_schedule_entry(ht, e);
  257. }
  258. /* Randomized stress worker thread function. */
  259. static int stress_worker(void *arg)
  260. {
  261. /* Enumeration of possible task. */
  262. enum {
  263. SCHEDULING = 0,
  264. CANCELLING = 1,
  265. POLLING = 2,
  266. NOTHING = 3
  267. };
  268. /* Probability of a certain task being chosen.
  269. * The first number indicates the probability of the first task,
  270. * the second number for the second task, and so on.
  271. */
  272. int prob[3] = {75, 15, 5};
  273. struct thread_param *tparam = (struct thread_param*)arg;
  274. int t_idx, i;
  275. t_idx = pj_atomic_inc_and_get(tparam->idx);
  276. PJ_LOG(4,("test", "...thread #%d (random) started", t_idx));
  277. while (!tparam->stopping) {
  278. int job, task;
  279. int idx, count;
  280. pj_status_t prev_status, status;
  281. /* Randomly choose which task to do */
  282. job = pj_rand() % 100;
  283. if (job < prob[0]) task = SCHEDULING;
  284. else if (job < (prob[0] + prob[1])) task = CANCELLING;
  285. else if (job < (prob[0] + prob[1] + prob[2])) task = POLLING;
  286. else task = NOTHING;
  287. idx = pj_rand() % ST_ENTRY_COUNT;
  288. prev_status = pj_atomic_get(tparam->status[idx]);
  289. if (task == SCHEDULING) {
  290. if (prev_status != 0) continue;
  291. status = st_schedule_entry(tparam->timer, &tparam->entries[idx]);
  292. if (prev_status == 0 && status != PJ_SUCCESS) {
  293. /* To make sure the flag has been set. */
  294. pj_thread_sleep(20);
  295. if (pj_atomic_get(tparam->status[idx]) == 1) {
  296. /* Race condition with another scheduling. */
  297. PJ_LOG(3,("test", "race schedule-schedule %d: %p",
  298. idx, &tparam->entries[idx]));
  299. } else {
  300. if (tparam->err != 0) tparam->err = -210;
  301. PJ_LOG(3,("test", "error: failed to schedule entry %d: %p",
  302. idx, &tparam->entries[idx]));
  303. }
  304. } else if (prev_status == 1 && status == PJ_SUCCESS) {
  305. /* Race condition with another cancellation or
  306. * timer poll.
  307. */
  308. pj_thread_sleep(20);
  309. PJ_LOG(3,("test", "race schedule-cancel/poll %d: %p",
  310. idx, &tparam->entries[idx]));
  311. }
  312. if (status == PJ_SUCCESS) {
  313. pj_atomic_set(tparam->status[idx], 1);
  314. pj_atomic_inc(tparam->n_sched);
  315. }
  316. } else if (task == CANCELLING) {
  317. count = pj_timer_heap_cancel_if_active(tparam->timer,
  318. &tparam->entries[idx], 10);
  319. if (prev_status == 0 && count > 0) {
  320. /* To make sure the flag has been set. */
  321. pj_thread_sleep(20);
  322. if (pj_atomic_get(tparam->status[idx]) == 1) {
  323. /* Race condition with scheduling. */
  324. PJ_LOG(3,("test", "race cancel-schedule %d: %p",
  325. idx, &tparam->entries[idx]));
  326. } else {
  327. if (tparam->err != 0) tparam->err = -220;
  328. PJ_LOG(3,("test", "error: cancelling invalid entry %d: %p",
  329. idx, &tparam->entries[idx]));
  330. }
  331. } else if (prev_status == 1 && count == 0) {
  332. /* To make sure the flag has been cleared. */
  333. pj_thread_sleep(20);
  334. if (pj_atomic_get(tparam->status[idx]) == 0) {
  335. /* Race condition with polling. */
  336. PJ_LOG(3,("test", "race cancel-poll %d: %p",
  337. idx, &tparam->entries[idx]));
  338. } else {
  339. if (tparam->err != 0) tparam->err = -230;
  340. PJ_LOG(3,("test", "error: failed to cancel entry %d: %p",
  341. idx, &tparam->entries[idx]));
  342. }
  343. }
  344. if (count > 0) {
  345. /* Make sure the flag has been set. */
  346. while (pj_atomic_get(tparam->status[idx]) != 1)
  347. pj_thread_sleep(10);
  348. pj_atomic_set(tparam->status[idx], 0);
  349. pj_atomic_inc(tparam->n_cancel);
  350. }
  351. } else if (task == POLLING) {
  352. count = pj_timer_heap_poll(tparam->timer, NULL);
  353. for (i = 0; i < count; i++) {
  354. pj_atomic_inc_and_get(tparam->n_poll);
  355. }
  356. } else {
  357. pj_thread_sleep(10);
  358. }
  359. }
  360. PJ_LOG(4,("test", "...thread #%d (poll) stopped", t_idx));
  361. return 0;
  362. }
  363. /* Poll worker thread function. */
  364. static int poll_worker(void *arg)
  365. {
  366. struct thread_param *tparam = (struct thread_param*)arg;
  367. int idx;
  368. idx = pj_atomic_inc_and_get(tparam->idx);
  369. tparam->stat[idx].is_poll = PJ_TRUE;
  370. PJ_LOG(4,("test", "...thread #%d (poll) started", idx));
  371. while (!tparam->stopping) {
  372. unsigned count;
  373. count = pj_timer_heap_poll(tparam->timer, NULL);
  374. if (count > 0) {
  375. /* Count expired entries */
  376. PJ_LOG(5,("test", "...thread #%d called %d entries",
  377. idx, count));
  378. tparam->stat[idx].cnt += count;
  379. } else {
  380. pj_thread_sleep(10);
  381. }
  382. }
  383. PJ_LOG(4,("test", "...thread #%d (poll) stopped", idx));
  384. return 0;
  385. }
  386. #if ST_CANCEL_THREAD_COUNT
  387. /* Cancel worker thread function. */
  388. static int cancel_worker(void *arg)
  389. {
  390. struct thread_param *tparam = (struct thread_param*)arg;
  391. int idx;
  392. idx = pj_atomic_inc_and_get(tparam->idx);
  393. tparam->stat[idx].is_poll = PJ_FALSE;
  394. PJ_LOG(4,("test", "...thread #%d (cancel) started", idx));
  395. while (!tparam->stopping) {
  396. int count;
  397. pj_timer_entry *e = &tparam->entries[pj_rand() % ST_ENTRY_COUNT];
  398. count = pj_timer_heap_cancel_if_active(tparam->timer, e, 2);
  399. if (count > 0) {
  400. /* Count cancelled entries */
  401. PJ_LOG(5,("test", "...thread #%d cancelled %d entries",
  402. idx, count));
  403. tparam->stat[idx].cnt += count;
  404. /* Reschedule entry after some delay */
  405. pj_thread_sleep(pj_rand() % 100);
  406. st_schedule_entry(tparam->timer, e);
  407. }
  408. }
  409. PJ_LOG(4,("test", "...thread #%d (cancel) stopped", idx));
  410. return 0;
  411. }
  412. #endif
  413. static int timer_stress_test(void)
  414. {
  415. unsigned count = 0, n_sched = 0, n_cancel = 0, n_poll = 0;
  416. int i;
  417. pj_timer_entry *entries = NULL;
  418. pj_atomic_t **entries_status = NULL;
  419. pj_grp_lock_t **grp_locks = NULL;
  420. pj_pool_t *pool;
  421. pj_timer_heap_t *timer = NULL;
  422. pj_lock_t *timer_lock;
  423. pj_status_t status;
  424. int err=0;
  425. pj_thread_t **stress_threads = NULL;
  426. pj_thread_t **poll_threads = NULL;
  427. pj_thread_t **cancel_threads = NULL;
  428. struct thread_param tparam = {0};
  429. pj_time_val now;
  430. #if SIMULATE_CRASH
  431. pj_timer_entry *entry;
  432. pj_pool_t *tmp_pool;
  433. pj_time_val delay = {0};
  434. #endif
  435. PJ_LOG(3,("test", "...Stress test"));
  436. pj_gettimeofday(&now);
  437. pj_srand(now.sec);
  438. pool = pj_pool_create( mem, NULL, 128, 128, NULL);
  439. if (!pool) {
  440. PJ_LOG(3,("test", "...error: unable to create pool"));
  441. err = -10;
  442. goto on_return;
  443. }
  444. /* Create timer heap.
  445. * Initially we only create a fraction of what's required,
  446. * to test the timer heap growth algorithm.
  447. */
  448. status = pj_timer_heap_create(pool, ST_ENTRY_COUNT/64, &timer);
  449. if (status != PJ_SUCCESS) {
  450. app_perror("...error: unable to create timer heap", status);
  451. err = -20;
  452. goto on_return;
  453. }
  454. /* Set recursive lock for the timer heap. */
  455. status = pj_lock_create_recursive_mutex( pool, "lock", &timer_lock);
  456. if (status != PJ_SUCCESS) {
  457. app_perror("...error: unable to create lock", status);
  458. err = -30;
  459. goto on_return;
  460. }
  461. pj_timer_heap_set_lock(timer, timer_lock, PJ_TRUE);
  462. /* Create group locks for the timer entry. */
  463. if (ST_ENTRY_GROUP_LOCK_COUNT) {
  464. grp_locks = (pj_grp_lock_t**)
  465. pj_pool_calloc(pool, ST_ENTRY_GROUP_LOCK_COUNT,
  466. sizeof(pj_grp_lock_t*));
  467. tparam.grp_locks = grp_locks;
  468. }
  469. for (i=0; i<ST_ENTRY_GROUP_LOCK_COUNT; ++i) {
  470. status = pj_grp_lock_create(pool, NULL, &grp_locks[i]);
  471. if (status != PJ_SUCCESS) {
  472. app_perror("...error: unable to create group lock", status);
  473. err = -40;
  474. goto on_return;
  475. }
  476. pj_grp_lock_add_ref(grp_locks[i]);
  477. }
  478. /* Create and schedule timer entries */
  479. entries = (pj_timer_entry*)pj_pool_calloc(pool, ST_ENTRY_COUNT,
  480. sizeof(*entries));
  481. if (!entries) {
  482. err = -50;
  483. goto on_return;
  484. }
  485. entries_status = (pj_atomic_t**)pj_pool_calloc(pool, ST_ENTRY_COUNT,
  486. sizeof(*entries_status));
  487. if (!entries_status) {
  488. err = -55;
  489. goto on_return;
  490. }
  491. for (i=0; i<ST_ENTRY_COUNT; ++i) {
  492. pj_timer_entry_init(&entries[i], 0, &tparam, &st_entry_callback);
  493. status = pj_atomic_create(pool, -1, &entries_status[i]);
  494. if (status != PJ_SUCCESS) {
  495. err = -60;
  496. goto on_return;
  497. }
  498. pj_atomic_set(entries_status[i], 0);
  499. /* For randomized test, we schedule the entry inside the thread */
  500. if (!ST_STRESS_THREAD_COUNT) {
  501. status = st_schedule_entry(timer, &entries[i]);
  502. if (status != PJ_SUCCESS) {
  503. app_perror("...error: unable to schedule entry", status);
  504. err = -60;
  505. goto on_return;
  506. }
  507. }
  508. }
  509. tparam.stopping = PJ_FALSE;
  510. tparam.timer = timer;
  511. tparam.entries = entries;
  512. tparam.status = entries_status;
  513. status = pj_atomic_create(pool, -1, &tparam.idx);
  514. if (status != PJ_SUCCESS) {
  515. app_perror("...error: unable to create atomic", status);
  516. err = -70;
  517. goto on_return;
  518. }
  519. status = pj_atomic_create(pool, -1, &tparam.n_sched);
  520. pj_assert (status == PJ_SUCCESS);
  521. pj_atomic_set(tparam.n_sched, 0);
  522. status = pj_atomic_create(pool, -1, &tparam.n_cancel);
  523. pj_assert (status == PJ_SUCCESS);
  524. pj_atomic_set(tparam.n_cancel, 0);
  525. status = pj_atomic_create(pool, -1, &tparam.n_poll);
  526. pj_assert (status == PJ_SUCCESS);
  527. pj_atomic_set(tparam.n_poll, 0);
  528. /* Start stress worker threads */
  529. if (ST_STRESS_THREAD_COUNT) {
  530. stress_threads = (pj_thread_t**)
  531. pj_pool_calloc(pool, ST_STRESS_THREAD_COUNT,
  532. sizeof(pj_thread_t*));
  533. }
  534. for (i=0; i<ST_STRESS_THREAD_COUNT; ++i) {
  535. status = pj_thread_create( pool, "poll", &stress_worker, &tparam,
  536. 0, 0, &stress_threads[i]);
  537. if (status != PJ_SUCCESS) {
  538. app_perror("...error: unable to create stress thread", status);
  539. err = -75;
  540. goto on_return;
  541. }
  542. }
  543. /* Start poll worker threads */
  544. if (ST_POLL_THREAD_COUNT) {
  545. poll_threads = (pj_thread_t**)
  546. pj_pool_calloc(pool, ST_POLL_THREAD_COUNT,
  547. sizeof(pj_thread_t*));
  548. }
  549. for (i=0; i<ST_POLL_THREAD_COUNT; ++i) {
  550. status = pj_thread_create( pool, "poll", &poll_worker, &tparam,
  551. 0, 0, &poll_threads[i]);
  552. if (status != PJ_SUCCESS) {
  553. app_perror("...error: unable to create poll thread", status);
  554. err = -80;
  555. goto on_return;
  556. }
  557. }
  558. /* Start cancel worker threads */
  559. #if ST_CANCEL_THREAD_COUNT
  560. cancel_threads = (pj_thread_t**)
  561. pj_pool_calloc(pool, ST_CANCEL_THREAD_COUNT,
  562. sizeof(pj_thread_t*));
  563. for (i=0; i<ST_CANCEL_THREAD_COUNT; ++i) {
  564. status = pj_thread_create( pool, "cancel", &cancel_worker, &tparam,
  565. 0, 0, &cancel_threads[i]);
  566. if (status != PJ_SUCCESS) {
  567. app_perror("...error: unable to create cancel thread", status);
  568. err = -90;
  569. goto on_return;
  570. }
  571. }
  572. #endif
  573. #if SIMULATE_CRASH
  574. tmp_pool = pj_pool_create( mem, NULL, 4096, 128, NULL);
  575. pj_assert(tmp_pool);
  576. entry = (pj_timer_entry*)pj_pool_calloc(tmp_pool, 1, sizeof(*entry));
  577. pj_assert(entry);
  578. pj_timer_entry_init(entry, 0, &tparam, &dummy_callback);
  579. delay.sec = 6;
  580. status = pj_timer_heap_schedule(timer, entry, &delay);
  581. pj_assert(status == PJ_SUCCESS);
  582. pj_thread_sleep(1000);
  583. PJ_LOG(3,("test", "...Releasing timer entry %p without cancelling it",
  584. entry));
  585. pj_pool_secure_release(&tmp_pool);
  586. //pj_pool_release(tmp_pool);
  587. //pj_memset(tmp_pool, 128, 4096);
  588. #endif
  589. /* Wait */
  590. pj_thread_sleep(ST_DURATION);
  591. on_return:
  592. PJ_LOG(3,("test", "...Cleaning up resources"));
  593. tparam.stopping = PJ_TRUE;
  594. for (i=0; i<ST_STRESS_THREAD_COUNT; ++i) {
  595. if (!stress_threads || !stress_threads[i])
  596. continue;
  597. pj_thread_join(stress_threads[i]);
  598. pj_thread_destroy(stress_threads[i]);
  599. }
  600. for (i=0; i<ST_POLL_THREAD_COUNT; ++i) {
  601. if (!poll_threads[i])
  602. continue;
  603. pj_thread_join(poll_threads[i]);
  604. pj_thread_destroy(poll_threads[i]);
  605. }
  606. for (i=0; i<ST_CANCEL_THREAD_COUNT; ++i) {
  607. if (!cancel_threads[i])
  608. continue;
  609. pj_thread_join(cancel_threads[i]);
  610. pj_thread_destroy(cancel_threads[i]);
  611. }
  612. for (i=0; i<ST_POLL_THREAD_COUNT+ST_CANCEL_THREAD_COUNT; ++i) {
  613. PJ_LOG(3,("test", "...Thread #%d (%s) executed %d entries",
  614. i, (tparam.stat[i].is_poll? "poll":"cancel"),
  615. tparam.stat[i].cnt));
  616. }
  617. for (i=0; i<ST_ENTRY_COUNT; ++i) {
  618. count += pj_timer_heap_cancel_if_active(timer, &entries[i], 10);
  619. if (entries_status)
  620. pj_atomic_destroy(entries_status[i]);
  621. }
  622. for (i=0; i<ST_ENTRY_GROUP_LOCK_COUNT; ++i) {
  623. /* Ref count must be equal to 1 */
  624. if (pj_grp_lock_get_ref(grp_locks[i]) != 1) {
  625. pj_assert(!"Group lock ref count must be equal to 1");
  626. if (!err) err = -100;
  627. }
  628. pj_grp_lock_dec_ref(grp_locks[i]);
  629. }
  630. if (timer)
  631. pj_timer_heap_destroy(timer);
  632. PJ_LOG(3,("test", "Total memory of timer heap: %lu",
  633. (unsigned long)pj_timer_heap_mem_size(ST_ENTRY_COUNT)));
  634. if (tparam.idx)
  635. pj_atomic_destroy(tparam.idx);
  636. if (tparam.n_sched) {
  637. n_sched = pj_atomic_get(tparam.n_sched);
  638. PJ_LOG(3,("test", "Total number of scheduled entries: %d", n_sched));
  639. pj_atomic_destroy(tparam.n_sched);
  640. }
  641. if (tparam.n_cancel) {
  642. n_cancel = pj_atomic_get(tparam.n_cancel);
  643. PJ_LOG(3,("test", "Total number of cancelled entries: %d", n_cancel));
  644. pj_atomic_destroy(tparam.n_cancel);
  645. }
  646. if (tparam.n_poll) {
  647. n_poll = pj_atomic_get(tparam.n_poll);
  648. PJ_LOG(3,("test", "Total number of polled entries: %d", n_poll));
  649. pj_atomic_destroy(tparam.n_poll);
  650. }
  651. PJ_LOG(3,("test", "Number of remaining active entries: %d", count));
  652. if (n_sched) {
  653. pj_bool_t match = PJ_TRUE;
  654. #if SIMULATE_CRASH
  655. n_sched++;
  656. #endif
  657. if (n_sched != (n_cancel + n_poll + count)) {
  658. if (tparam.err != 0) tparam.err = -250;
  659. match = PJ_FALSE;
  660. }
  661. PJ_LOG(3,("test", "Scheduled = cancelled + polled + remaining?: %s",
  662. (match? "yes": "no")));
  663. }
  664. pj_pool_safe_release(&pool);
  665. return (err? err: tparam.err);
  666. }
  667. static int get_random_delay()
  668. {
  669. return pj_rand() % BT_ENTRY_COUNT;
  670. }
  671. static int get_next_delay(int delay)
  672. {
  673. return ++delay;
  674. }
  675. typedef enum BENCH_TEST_TYPE {
  676. RANDOM_SCH = 0,
  677. RANDOM_CAN = 1,
  678. INCREMENT_SCH = 2,
  679. INCREMENT_CAN = 3
  680. } BENCH_TEST_TYPE;
  681. static char *get_test_name(BENCH_TEST_TYPE test_type) {
  682. switch (test_type) {
  683. case RANDOM_SCH:
  684. case INCREMENT_SCH:
  685. return "schedule";
  686. case RANDOM_CAN:
  687. case INCREMENT_CAN:
  688. return "cancel";
  689. }
  690. return "undefined";
  691. }
  692. static void *get_format_num(unsigned n, char *out)
  693. {
  694. int c;
  695. char buf[64];
  696. char *p;
  697. pj_ansi_snprintf(buf, 64, "%d", n);
  698. c = 2 - pj_ansi_strlen(buf) % 3;
  699. for (p = buf; *p != 0; ++p) {
  700. *out++ = *p;
  701. if (c == 1) {
  702. *out++ = ',';
  703. }
  704. c = (c + 1) % 3;
  705. }
  706. *--out = 0;
  707. return out;
  708. }
  709. static void print_bench(BENCH_TEST_TYPE test_type, pj_timestamp time_freq,
  710. pj_timestamp time_start, int start_idx, int end_idx)
  711. {
  712. char start_idx_str[64];
  713. char end_idx_str[64];
  714. char num_req_str[64];
  715. unsigned num_req;
  716. pj_timestamp t2;
  717. pj_get_timestamp(&t2);
  718. pj_sub_timestamp(&t2, &time_start);
  719. num_req = (unsigned)(time_freq.u64 * (end_idx-start_idx) / t2.u64);
  720. if (test_type == RANDOM_CAN || test_type == INCREMENT_CAN) {
  721. start_idx = BT_ENTRY_COUNT - start_idx;
  722. end_idx = BT_ENTRY_COUNT - end_idx;
  723. }
  724. get_format_num(start_idx, start_idx_str);
  725. get_format_num(end_idx, end_idx_str);
  726. get_format_num(num_req, num_req_str);
  727. PJ_LOG(3, (THIS_FILE, " Entries %s-%s: %s %s ent/sec",
  728. start_idx_str, end_idx_str, get_test_name(test_type),
  729. num_req_str));
  730. }
  731. static int bench_test(pj_timer_heap_t *timer,
  732. pj_timer_entry *entries,
  733. pj_timestamp freq,
  734. BENCH_TEST_TYPE test_type)
  735. {
  736. pj_timestamp t1;
  737. unsigned mult = BT_ENTRY_SHOW_START;
  738. int i, j;
  739. pj_get_timestamp(&t1);
  740. /*Schedule random entry.*/
  741. for (i=0, j=0; j < BT_ENTRY_COUNT; ++j) {
  742. pj_time_val delay = { 0 };
  743. pj_status_t status;
  744. if (test_type == RANDOM_SCH || test_type == INCREMENT_SCH) {
  745. if (test_type == RANDOM_SCH)
  746. delay.msec = get_random_delay();
  747. else
  748. delay.msec = get_next_delay(delay.msec);
  749. pj_timer_entry_init(&entries[j], 0, NULL, &dummy_callback);
  750. status = pj_timer_heap_schedule(timer, &entries[j], &delay);
  751. if (status != PJ_SUCCESS) {
  752. app_perror("...error: unable to schedule timer entry", status);
  753. return -50;
  754. }
  755. } else if (test_type == RANDOM_CAN || test_type == INCREMENT_CAN) {
  756. unsigned num_ent = pj_timer_heap_cancel(timer, &entries[j]);
  757. if (num_ent == 0) {
  758. PJ_LOG(3, ("test", "...error: unable to cancel timer entry"));
  759. return -60;
  760. }
  761. } else {
  762. return -70;
  763. }
  764. if (j && (j % mult) == 0) {
  765. print_bench(test_type, freq, t1, i, j);
  766. i = j+1;
  767. pj_get_timestamp(&t1);
  768. mult *= BT_ENTRY_SHOW_MULT;
  769. }
  770. }
  771. if (j > 0 && ((j-1) % mult != 0)) {
  772. print_bench(test_type, freq, t1, i, j);
  773. }
  774. return 0;
  775. }
  776. static int timer_bench_test(void)
  777. {
  778. pj_pool_t *pool = NULL;
  779. pj_timer_heap_t *timer = NULL;
  780. pj_status_t status;
  781. int err=0;
  782. pj_timer_entry *entries = NULL;
  783. pj_timestamp freq;
  784. int i;
  785. PJ_LOG(3,("test", "...Benchmark test"));
  786. status = pj_get_timestamp_freq(&freq);
  787. if (status != PJ_SUCCESS) {
  788. PJ_LOG(3,("test", "...error: unable to get timestamp freq"));
  789. err = -10;
  790. goto on_return;
  791. }
  792. pool = pj_pool_create( mem, NULL, 128, 128, NULL);
  793. if (!pool) {
  794. PJ_LOG(3,("test", "...error: unable to create pool"));
  795. err = -20;
  796. goto on_return;
  797. }
  798. /* Create timer heap.*/
  799. status = pj_timer_heap_create(pool, BT_ENTRY_COUNT/64, &timer);
  800. if (status != PJ_SUCCESS) {
  801. app_perror("...error: unable to create timer heap", status);
  802. err = -30;
  803. goto on_return;
  804. }
  805. /* Create and schedule timer entries */
  806. entries = (pj_timer_entry*)pj_pool_calloc(pool, BT_ENTRY_COUNT,
  807. sizeof(*entries));
  808. if (!entries) {
  809. err = -40;
  810. goto on_return;
  811. }
  812. PJ_LOG(3,("test", "....random scheduling/cancelling test.."));
  813. for (i = 0; i < BT_REPEAT_RANDOM_TEST; ++i) {
  814. PJ_LOG(3,("test", " test %d of %d..", i+1, BT_REPEAT_RANDOM_TEST));
  815. err = bench_test(timer, entries, freq, RANDOM_SCH);
  816. if (err < 0)
  817. goto on_return;
  818. err = bench_test(timer, entries, freq, RANDOM_CAN);
  819. if (err < 0)
  820. goto on_return;
  821. }
  822. PJ_LOG(3,("test", "....increment scheduling/cancelling test.."));
  823. for (i = 0; i < BT_REPEAT_INC_TEST; ++i) {
  824. PJ_LOG(3,("test", " test %d of %d..", i+1, BT_REPEAT_INC_TEST));
  825. err = bench_test(timer, entries, freq, INCREMENT_SCH);
  826. if (err < 0)
  827. goto on_return;
  828. err = bench_test(timer, entries, freq, INCREMENT_CAN);
  829. if (err < 0)
  830. goto on_return;
  831. }
  832. on_return:
  833. PJ_LOG(3,("test", "...Cleaning up resources"));
  834. if (pool)
  835. pj_pool_safe_release(&pool);
  836. return err;
  837. }
  838. int timer_test()
  839. {
  840. int rc;
  841. rc = test_timer_heap();
  842. if (rc != 0)
  843. return rc;
  844. rc = timer_stress_test();
  845. if (rc != 0)
  846. return rc;
  847. #if WITH_BENCHMARK
  848. rc = timer_bench_test();
  849. if (rc != 0)
  850. return rc;
  851. #else
  852. /* Avoid unused warning */
  853. PJ_UNUSED_ARG(timer_bench_test);
  854. #endif
  855. return 0;
  856. }
  857. #else
  858. /* To prevent warning about "translation unit is empty"
  859. * when this test is disabled.
  860. */
  861. int dummy_timer_test;
  862. #endif /* INCLUDE_TIMER_TEST */