Преглед на файлове

fix unit to ms in benchmark csv file

gaotingquan преди 1 година
родител
ревизия
990eb14903
променени са 2 файла, в които са добавени 19 реда и са изтрити 19 реда
  1. 13 13
      docs/module_usage/instructions/benchmark.md
  2. 6 6
      paddlex/inference/utils/benchmark.py

+ 13 - 13
docs/module_usage/instructions/benchmark.md

@@ -30,23 +30,23 @@ python main.py \
 +----------------+-----------------+-----------------+------------------------+
 |   Component    | Total Time (ms) | Number of Calls | Avg Time Per Call (ms) |
 +----------------+-----------------+-----------------+------------------------+
-|    ReadCmp     |   102.39458084  |        10       |      10.23945808       |
-|     Resize     |   11.20400429   |        20       |       0.56020021       |
-|   Normalize    |   34.11078453   |        20       |       1.70553923       |
-|   ToCHWImage   |    0.05555153   |        20       |       0.00277758       |
-|    Copy2GPU    |    9.10568237   |        10       |       0.91056824       |
-|     Infer      |   98.22225571   |        10       |       9.82222557       |
-|    Copy2CPU    |   14.30845261   |        10       |       1.43084526       |
-| DetPostProcess |    0.45251846   |        20       |       0.02262592       |
+|    ReadCmp     |   100.20136833  |        10       |      10.02013683       |
+|     Resize     |   17.05980301   |        20       |       0.85299015       |
+|   Normalize    |   45.44949532   |        20       |       2.27247477       |
+|   ToCHWImage   |    0.03671646   |        20       |       0.00183582       |
+|    Copy2GPU    |   12.28785515   |        10       |       1.22878551       |
+|     Infer      |   76.59482956   |        10       |       7.65948296       |
+|    Copy2CPU    |    0.39863586   |        10       |       0.03986359       |
+| DetPostProcess |    0.43916702   |        20       |       0.02195835       |
 +----------------+-----------------+-----------------+------------------------+
 +-------------+-----------------+---------------------+----------------------------+
 |    Stage    | Total Time (ms) | Number of Instances | Avg Time Per Instance (ms) |
 +-------------+-----------------+---------------------+----------------------------+
-|  PreProcess |   147.76492119  |          20         |         7.38824606         |
-|  Inference  |   121.63639069  |          20         |         6.08181953         |
-| PostProcess |    0.45251846   |          20         |         0.02262592         |
-|   End2End   |   294.03519630  |          20         |        14.70175982         |
-|    WarmUp   |  7937.82591820  |          5          |       1587.56518364        |
+|  PreProcess |   162.74738312  |          20         |         8.13736916         |
+|  Inference  |   89.28132057   |          20         |         4.46406603         |
+| PostProcess |    0.43916702   |          20         |         0.02195835         |
+|   End2End   |    0.27992606   |          20         |         0.01399630         |
+|    WarmUp   |    5.37562728   |          5          |         1.07512546         |
 +-------------+-----------------+---------------------+----------------------------+
 ```
 

+ 6 - 6
paddlex/inference/utils/benchmark.py

@@ -68,16 +68,16 @@ class Benchmark:
                 for name, sub_cmp in cmp.sub_cmps.items():
                     times = sub_cmp.timer.logs
                     counts = len(times)
-                    avg = np.mean(times)
-                    total = np.sum(times)
+                    avg = np.mean(times) * 1000
+                    total = np.sum(times) * 1000
                     detail.append((name, total, counts, avg))
                     summary["inference"] += total
                 op_tag = "postprocess"
             else:
                 times = cmp.timer.logs
                 counts = len(times)
-                avg = np.mean(times)
-                total = np.sum(times)
+                avg = np.mean(times) * 1000
+                total = np.sum(times) * 1000
                 detail.append((name, total, counts, avg))
                 summary[op_tag] += total
 
@@ -126,7 +126,7 @@ class Benchmark:
         table = PrettyTable(detail_head)
         table.add_rows(
             [
-                (name, f"{total * 1000:.8f}", cnts, f"{avg * 1000:.8f}")
+                (name, f"{total:.8f}", cnts, f"{avg:.8f}")
                 for name, total, cnts, avg in detail
             ]
         )
@@ -141,7 +141,7 @@ class Benchmark:
         table = PrettyTable(summary_head)
         table.add_rows(
             [
-                (name, f"{total * 1000:.8f}", cnts, f"{avg * 1000:.8f}")
+                (name, f"{total:.8f}", cnts, f"{avg:.8f}")
                 for name, total, cnts, avg in summary
             ]
         )