summaryrefslogtreecommitdiffstats
path: root/meta/recipes-kernel/perf/perf/sort-pmuevents.py
blob: 4f841eb822616d394b4cee499d85f7415ce677b3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
#!/usr/bin/env python3

# perf pmu-events sorting tool
#
# Copyright (C) 2021 Bruce Ashfield
#
# SPDX-License-Identifier: MIT
#

import sys
import os
import re
from collections import OrderedDict

if len(sys.argv) < 2:
    print( "[ERROR]: input and output pmu files missing" )
    sys.exit(1)

if len(sys.argv) < 3:
    print( "[ERROR]: output pmu file missing" )
    sys.exit(1)

infile = sys.argv[1]
outfile = sys.argv[2]

if not os.path.exists(infile):
    print( "ERROR. input file does not exist: %s" % infile )
    sys.exit(1)

if os.path.exists(outfile):
    print( "WARNING. output file will be overwritten: %s" % infile )

with open(infile, 'r') as file:
    data = file.read()

preamble_regex = re.compile( '^(.*?)^struct', re.MULTILINE | re.DOTALL )

preamble = re.search( preamble_regex, data )
struct_block_regex = re.compile( '^struct.*?(\w+) (.*?)\[\] = {(.*?)^};', re.MULTILINE | re.DOTALL )
field_regex =  re.compile( '{.*?},', re.MULTILINE | re.DOTALL )
cpuid_regex = re.compile( '\.cpuid = (.*?),', re.MULTILINE | re.DOTALL )
name_regex = re.compile( '\.name = (.*?),', re.MULTILINE | re.DOTALL )

# create a dictionary structure to store all the structs, their
# types and then their fields.
entry_dict = {}
for struct in re.findall( struct_block_regex, data ):
    # print( "struct: %s %s" % (struct[0],struct[1]) )
    entry_dict[struct[1]] = {}
    entry_dict[struct[1]]['type'] = struct[0]
    entry_dict[struct[1]]['fields'] = {}
    for entry in re.findall( field_regex, struct[2] ):
        #print( "    entry: %s" % entry )
        cpuid = re.search( cpuid_regex, entry )
        if cpuid:
            #print( "    cpuid found: %s" % cpuid.group(1) )
            entry_dict[struct[1]]['fields'][cpuid.group(1)] = entry
            
        name = re.search( name_regex, entry )
        if name:
            #print( "    name found: %s" % name.group(1) )
            entry_dict[struct[1]]['fields'][name.group(1)] = entry
        
        if not entry_dict[struct[1]]['fields']:
            entry_dict[struct[1]]['fields']['0'] = entry

# created ordered dictionaries from the captured values. These are ordered by
# a sorted() iteration of the keys. We don't care about the order we read
# things, just the sorted order. Hency why we couldn't create these during
# reading.
#
# yes, there's a more concise way to do this, but our nested dictionaries of
# fields make it complex enough that it becomes unreadable.
entry_dict_sorted = OrderedDict()
for i in sorted(entry_dict.keys()):
    entry_dict_sorted[i] = {}
    entry_dict_sorted[i]['type'] = entry_dict[i]['type']
    entry_dict_sorted[i]['fields'] = {}
    for f in sorted(entry_dict[i]['fields'].keys()):
        entry_dict_sorted[i]['fields'][f] = entry_dict[i]['fields'][f] 

# dump the sorted elements to the outfile
outf = open( outfile, 'w' )

print( preamble.group(1) )
outf.write( preamble.group(1) )
for d in entry_dict_sorted:
    outf.write( "struct %s %s[] = {\n" % (entry_dict_sorted[d]['type'],d) )
    for f in entry_dict_sorted[d]['fields']:
        outf.write( entry_dict_sorted[d]['fields'][f] + '\n' )

    outf.write( "};\n" )

outf.close()