Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:lib\rbtree_test.c Create Date:2022-07-28 07:21:26
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:rbtree_test_init

Proto:static int __init rbtree_test_init(void)

Type:int

Parameter:Nothing

248  nodes = kmalloc_array - allocate memory for an array.*@n: number of elements.*@size: element size.*@flags: the type of memory to allocate (see kmalloc).
249  If Not nodes Then Return -ENOMEM
252  printk(action must be taken immediately "rbtree testing")
254  prandom_seed_state - set seed for prandom_u32_state().*@state: pointer to state structure to receive the seed.*@seed: arbitrary 64-bit value to use as a seed.
255  init()
257  time1 = get_cycles()
259  When i < perf_loops cycle
260  When j < nnodes cycle insert(nodes + j, & root)
262  When j < nnodes cycle erase(nodes + j, & root)
266  time2 = get_cycles()
267  time = time2 - time1
269  time = div_u64 - unsigned 64bit divide with 32bit divisor*@dividend: unsigned 64bit dividend*@divisor: unsigned 32bit divisor* This is the most common 64bit divide and should be used if possible,* as many 32bit archs can optimize this variant better than a full
270  printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsignedlonglong)time)
273  time1 = get_cycles()
275  When i < perf_loops cycle
276  When j < nnodes cycle insert_cached(nodes + j, & root)
278  When j < nnodes cycle erase_cached(nodes + j, & root)
282  time2 = get_cycles()
283  time = time2 - time1
285  time = div_u64 - unsigned 64bit divide with 32bit divisor*@dividend: unsigned 64bit dividend*@divisor: unsigned 32bit divisor* This is the most common 64bit divide and should be used if possible,* as many 32bit archs can optimize this variant better than a full
286  printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsignedlonglong)time)
289  When i < nnodes cycle insert(nodes + i, & root)
292  time1 = get_cycles()
294  When i < perf_loops cycle
295  When node cycle
299  time2 = get_cycles()
300  time = time2 - time1
302  time = div_u64 - unsigned 64bit divide with 32bit divisor*@dividend: unsigned 64bit dividend*@divisor: unsigned 32bit divisor* This is the most common 64bit divide and should be used if possible,* as many 32bit archs can optimize this variant better than a full
303  printk(" -> test 3 (latency of inorder traversal): %llu cycles\n", (unsignedlonglong)time)
306  time1 = get_cycles()
308  When i < perf_loops cycle node = This function returns the first node (in sort order) of the tree.
311  time2 = get_cycles()
312  time = time2 - time1
314  time = div_u64 - unsigned 64bit divide with 32bit divisor*@dividend: unsigned 64bit dividend*@divisor: unsigned 32bit divisor* This is the most common 64bit divide and should be used if possible,* as many 32bit archs can optimize this variant better than a full
315  printk(" -> test 4 (latency to fetch first node)\n")
316  printk(" non-cached: %llu cycles\n", (unsignedlonglong)time)
318  time1 = get_cycles()
320  When i < perf_loops cycle node = Same as rb_first(), but O(1) ( & root)
323  time2 = get_cycles()
324  time = time2 - time1
326  time = div_u64 - unsigned 64bit divide with 32bit divisor*@dividend: unsigned 64bit dividend*@divisor: unsigned 32bit divisor* This is the most common 64bit divide and should be used if possible,* as many 32bit archs can optimize this variant better than a full
327  printk(" cached: %llu cycles\n", (unsignedlonglong)time)
329  When i < nnodes cycle erase(nodes + i, & root)
333  When i < check_loops cycle
334  init()
335  When j < nnodes cycle
336  check(j)
337  insert(nodes + j, & root)
339  When j < nnodes cycle
340  check(nnodes - j)
341  erase(nodes + j, & root)
343  check(0)
346  printk(action must be taken immediately "augmented rbtree testing")
348  init()
350  time1 = get_cycles()
352  When i < perf_loops cycle
353  When j < nnodes cycle insert_augmented(nodes + j, & root)
355  When j < nnodes cycle erase_augmented(nodes + j, & root)
359  time2 = get_cycles()
360  time = time2 - time1
362  time = div_u64 - unsigned 64bit divide with 32bit divisor*@dividend: unsigned 64bit dividend*@divisor: unsigned 32bit divisor* This is the most common 64bit divide and should be used if possible,* as many 32bit archs can optimize this variant better than a full
363  printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsignedlonglong)time)
365  time1 = get_cycles()
367  When i < perf_loops cycle
368  When j < nnodes cycle insert_augmented_cached(nodes + j, & root)
370  When j < nnodes cycle erase_augmented_cached(nodes + j, & root)
374  time2 = get_cycles()
375  time = time2 - time1
377  time = div_u64 - unsigned 64bit divide with 32bit divisor*@dividend: unsigned 64bit dividend*@divisor: unsigned 32bit divisor* This is the most common 64bit divide and should be used if possible,* as many 32bit archs can optimize this variant better than a full
378  printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsignedlonglong)time)
380  When i < check_loops cycle
381  init()
382  When j < nnodes cycle
386  When j < nnodes cycle
388  erase_augmented(nodes + j, & root)
390  check_augmented(0)
393  kfree(nodes)
395  Return -EAGAIN