diff options
Diffstat (limited to 'tools/perf/scripts/python')
-rw-r--r-- | tools/perf/scripts/python/export-to-postgresql.py | 61 | ||||
-rw-r--r-- | tools/perf/scripts/python/export-to-sqlite.py | 26 | ||||
-rwxr-xr-x | tools/perf/scripts/python/exported-sql-viewer.py | 119 |
3 files changed, 152 insertions, 54 deletions
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 390a351d15ea..c3eae1d77d36 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -10,6 +10,8 @@ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. +from __future__ import print_function + import os import sys import struct @@ -199,6 +201,18 @@ import datetime from PySide.QtSql import * +if sys.version_info < (3, 0): + def toserverstr(str): + return str + def toclientstr(str): + return str +else: + # Assume UTF-8 server_encoding and client_encoding + def toserverstr(str): + return bytes(str, "UTF_8") + def toclientstr(str): + return bytes(str, "UTF_8") + # Need to access PostgreSQL C library directly to use COPY FROM STDIN from ctypes import * libpq = CDLL("libpq.so.5") @@ -234,12 +248,17 @@ perf_db_export_mode = True perf_db_export_calls = False perf_db_export_callchains = False +def printerr(*args, **kw_args): + print(*args, file=sys.stderr, **kw_args) + +def printdate(*args, **kw_args): + print(datetime.datetime.today(), *args, sep=' ', **kw_args) def usage(): - print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]" - print >> sys.stderr, "where: columns 'all' or 'branches'" - print >> sys.stderr, " calls 'calls' => create calls and call_paths table" - print >> sys.stderr, " callchains 'callchains' => create call_paths table" + printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]") + printerr("where: columns 'all' or 'branches'") + printerr(" calls 'calls' => create calls and call_paths table") + printerr(" callchains 'callchains' => create call_paths table") raise Exception("Too few arguments") if (len(sys.argv) < 2): @@ -273,7 +292,7 @@ def do_query(q, s): return raise Exception("Query failed: " + q.lastError().text()) -print datetime.datetime.today(), "Creating database..." +printdate("Creating database...") db = QSqlDatabase.addDatabase('QPSQL') query = QSqlQuery(db) @@ -506,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS ' ' FROM samples') -file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0) -file_trailer = "\377\377" +file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0) +file_trailer = b"\377\377" def open_output_file(file_name): path_name = output_dir_name + "/" + file_name - file = open(path_name, "w+") + file = open(path_name, "wb+") file.write(file_header) return file @@ -526,13 +545,13 @@ def copy_output_file_direct(file, table_name): # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly def copy_output_file(file, table_name): - conn = PQconnectdb("dbname = " + dbname) + conn = PQconnectdb(toclientstr("dbname = " + dbname)) if (PQstatus(conn)): raise Exception("COPY FROM STDIN PQconnectdb failed") file.write(file_trailer) file.seek(0) sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" - res = PQexec(conn, sql) + res = PQexec(conn, toclientstr(sql)) if (PQresultStatus(res) != 4): raise Exception("COPY FROM STDIN PQexec failed") data = file.read(65536) @@ -566,7 +585,7 @@ if perf_db_export_calls: call_file = open_output_file("call_table.bin") def trace_begin(): - print datetime.datetime.today(), "Writing to intermediate files..." + printdate("Writing to intermediate files...") # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs evsel_table(0, "unknown") machine_table(0, 0, "unknown") @@ -582,7 +601,7 @@ def trace_begin(): unhandled_count = 0 def trace_end(): - print datetime.datetime.today(), "Copying to database..." + printdate("Copying to database...") copy_output_file(evsel_file, "selected_events") copy_output_file(machine_file, "machines") copy_output_file(thread_file, "threads") @@ -597,7 +616,7 @@ def trace_end(): if perf_db_export_calls: copy_output_file(call_file, "calls") - print datetime.datetime.today(), "Removing intermediate files..." + printdate("Removing intermediate files...") remove_output_file(evsel_file) remove_output_file(machine_file) remove_output_file(thread_file) @@ -612,7 +631,7 @@ def trace_end(): if perf_db_export_calls: remove_output_file(call_file) os.rmdir(output_dir_name) - print datetime.datetime.today(), "Adding primary keys" + printdate("Adding primary keys") do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') @@ -627,7 +646,7 @@ def trace_end(): if perf_db_export_calls: do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') - print datetime.datetime.today(), "Adding foreign keys" + printdate("Adding foreign keys") do_query(query, 'ALTER TABLE threads ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') @@ -663,8 +682,8 @@ def trace_end(): do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') if (unhandled_count): - print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" - print datetime.datetime.today(), "Done" + printdate("Warning: ", unhandled_count, " unhandled events") + printdate("Done") def trace_unhandled(event_name, context, event_fields_dict): global unhandled_count @@ -674,12 +693,14 @@ def sched__sched_switch(*x): pass def evsel_table(evsel_id, evsel_name, *x): + evsel_name = toserverstr(evsel_name) n = len(evsel_name) fmt = "!hiqi" + str(n) + "s" value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) evsel_file.write(value) def machine_table(machine_id, pid, root_dir, *x): + root_dir = toserverstr(root_dir) n = len(root_dir) fmt = "!hiqiii" + str(n) + "s" value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) @@ -690,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x): thread_file.write(value) def comm_table(comm_id, comm_str, *x): + comm_str = toserverstr(comm_str) n = len(comm_str) fmt = "!hiqi" + str(n) + "s" value = struct.pack(fmt, 2, 8, comm_id, n, comm_str) @@ -701,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x): comm_thread_file.write(value) def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): + short_name = toserverstr(short_name) + long_name = toserverstr(long_name) + build_id = toserverstr(build_id) n1 = len(short_name) n2 = len(long_name) n3 = len(build_id) @@ -709,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): dso_file.write(value) def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): + symbol_name = toserverstr(symbol_name) n = len(symbol_name) fmt = "!hiqiqiqiqiii" + str(n) + "s" value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) symbol_file.write(value) def branch_type_table(branch_type, name, *x): + name = toserverstr(name) n = len(name) fmt = "!hiii" + str(n) + "s" value = struct.pack(fmt, 2, 4, branch_type, n, name) diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py index eb63e6c7107f..3b71902a5a21 100644 --- a/tools/perf/scripts/python/export-to-sqlite.py +++ b/tools/perf/scripts/python/export-to-sqlite.py @@ -10,6 +10,8 @@ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. +from __future__ import print_function + import os import sys import struct @@ -60,11 +62,17 @@ perf_db_export_mode = True perf_db_export_calls = False perf_db_export_callchains = False +def printerr(*args, **keyword_args): + print(*args, file=sys.stderr, **keyword_args) + +def printdate(*args, **kw_args): + print(datetime.datetime.today(), *args, sep=' ', **kw_args) + def usage(): - print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]" - print >> sys.stderr, "where: columns 'all' or 'branches'" - print >> sys.stderr, " calls 'calls' => create calls and call_paths table" - print >> sys.stderr, " callchains 'callchains' => create call_paths table" + printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"); + printerr("where: columns 'all' or 'branches'"); + printerr(" calls 'calls' => create calls and call_paths table"); + printerr(" callchains 'callchains' => create call_paths table"); raise Exception("Too few arguments") if (len(sys.argv) < 2): @@ -100,7 +108,7 @@ def do_query_(q): return raise Exception("Query failed: " + q.lastError().text()) -print datetime.datetime.today(), "Creating database..." +printdate("Creating database ...") db_exists = False try: @@ -378,7 +386,7 @@ if perf_db_export_calls: call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") def trace_begin(): - print datetime.datetime.today(), "Writing records..." + printdate("Writing records...") do_query(query, 'BEGIN TRANSACTION') # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs evsel_table(0, "unknown") @@ -397,14 +405,14 @@ unhandled_count = 0 def trace_end(): do_query(query, 'END TRANSACTION') - print datetime.datetime.today(), "Adding indexes" + printdate("Adding indexes") if perf_db_export_calls: do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') if (unhandled_count): - print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" - print datetime.datetime.today(), "Done" + printdate("Warning: ", unhandled_count, " unhandled events") + printdate("Done") def trace_unhandled(event_name, context, event_fields_dict): global unhandled_count diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index afec9479ca7f..74ef92f1d19a 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -88,20 +88,39 @@ # 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip) # 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) +from __future__ import print_function + import sys import weakref import threading import string -import cPickle +try: + # Python2 + import cPickle as pickle + # size of pickled integer big enough for record size + glb_nsz = 8 +except ImportError: + import pickle + glb_nsz = 16 import re import os from PySide.QtCore import * from PySide.QtGui import * from PySide.QtSql import * +pyside_version_1 = True from decimal import * from ctypes import * from multiprocessing import Process, Array, Value, Event +# xrange is range in Python3 +try: + xrange +except NameError: + xrange = range + +def printerr(*args, **keyword_args): + print(*args, file=sys.stderr, **keyword_args) + # Data formatting helpers def tohex(ip): @@ -1004,10 +1023,6 @@ class ChildDataItemFinder(): glb_chunk_sz = 10000 -# size of pickled integer big enough for record size - -glb_nsz = 8 - # Background process for SQL data fetcher class SQLFetcherProcess(): @@ -1066,7 +1081,7 @@ class SQLFetcherProcess(): return True if space >= glb_nsz: # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer - nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL) + nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL) self.buffer[self.local_head : self.local_head + len(nd)] = nd self.local_head = 0 if self.local_tail - self.local_head > sz: @@ -1084,9 +1099,9 @@ class SQLFetcherProcess(): self.wait_event.wait() def AddToBuffer(self, obj): - d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL) + d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) n = len(d) - nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL) + nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL) sz = n + glb_nsz self.WaitForSpace(sz) pos = self.local_head @@ -1198,12 +1213,12 @@ class SQLFetcher(QObject): pos = self.local_tail if len(self.buffer) - pos < glb_nsz: pos = 0 - n = cPickle.loads(self.buffer[pos : pos + glb_nsz]) + n = pickle.loads(self.buffer[pos : pos + glb_nsz]) if n == 0: pos = 0 - n = cPickle.loads(self.buffer[0 : glb_nsz]) + n = pickle.loads(self.buffer[0 : glb_nsz]) pos += glb_nsz - obj = cPickle.loads(self.buffer[pos : pos + n]) + obj = pickle.loads(self.buffer[pos : pos + n]) self.local_tail = pos + n return obj @@ -1512,6 +1527,19 @@ def BranchDataPrep(query): " (" + dsoname(query.value(15)) + ")") return data +def BranchDataPrepWA(query): + data = [] + data.append(query.value(0)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(1))) + for i in xrange(2, 8): + data.append(query.value(i)) + data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + + " (" + dsoname(query.value(11)) + ")" + " -> " + + tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + + " (" + dsoname(query.value(15)) + ")") + return data + # Branch data model class BranchModel(TreeModel): @@ -1539,7 +1567,11 @@ class BranchModel(TreeModel): " AND evsel_id = " + str(self.event_id) + " ORDER BY samples.id" " LIMIT " + str(glb_chunk_sz)) - self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample) + if pyside_version_1 and sys.version_info[0] == 3: + prep = BranchDataPrepWA + else: + prep = BranchDataPrep + self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample) self.fetcher.done.connect(self.Update) self.fetcher.Fetch(glb_chunk_sz) @@ -2065,14 +2097,6 @@ def IsSelectable(db, table, sql = ""): return False return True -# SQL data preparation - -def SQLTableDataPrep(query, count): - data = [] - for i in xrange(count): - data.append(query.value(i)) - return data - # SQL table data model item class SQLTableItem(): @@ -2096,7 +2120,7 @@ class SQLTableModel(TableModel): self.more = True self.populated = 0 self.column_headers = column_headers - self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample) + self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample) self.fetcher.done.connect(self.Update) self.fetcher.Fetch(glb_chunk_sz) @@ -2140,6 +2164,12 @@ class SQLTableModel(TableModel): def columnHeader(self, column): return self.column_headers[column] + def SQLTableDataPrep(self, query, count): + data = [] + for i in xrange(count): + data.append(query.value(i)) + return data + # SQL automatic table data model class SQLAutoTableModel(SQLTableModel): @@ -2168,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel): QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") while query.next(): column_headers.append(query.value(0)) + if pyside_version_1 and sys.version_info[0] == 3: + if table_name == "samples_view": + self.SQLTableDataPrep = self.samples_view_DataPrep + if table_name == "samples": + self.SQLTableDataPrep = self.samples_DataPrep super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) + def samples_view_DataPrep(self, query, count): + data = [] + data.append(query.value(0)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(1))) + for i in xrange(2, count): + data.append(query.value(i)) + return data + + def samples_DataPrep(self, query, count): + data = [] + for i in xrange(9): + data.append(query.value(i)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(9))) + for i in xrange(10, count): + data.append(query.value(i)) + return data + # Base class for custom ResizeColumnsToContents class ResizeColumnsToContentsBase(QObject): @@ -2854,9 +2908,13 @@ class LibXED(): ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) if not ok: return 0, "" + if sys.version_info[0] == 2: + result = inst.buffer.value + else: + result = inst.buffer.value.decode() # Return instruction length and the disassembled instruction text # For now, assume the length is in byte 166 - return inst.xedd[166], inst.buffer.value + return inst.xedd[166], result def TryOpen(file_name): try: @@ -2872,9 +2930,14 @@ def Is64Bit(f): header = f.read(7) f.seek(pos) magic = header[0:4] - eclass = ord(header[4]) - encoding = ord(header[5]) - version = ord(header[6]) + if sys.version_info[0] == 2: + eclass = ord(header[4]) + encoding = ord(header[5]) + version = ord(header[6]) + else: + eclass = header[4] + encoding = header[5] + version = header[6] if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: result = True if eclass == 2 else False return result @@ -2973,7 +3036,7 @@ class DBRef(): def Main(): if (len(sys.argv) < 2): - print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}" + printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}"); raise Exception("Too few arguments") dbname = sys.argv[1] @@ -2986,8 +3049,8 @@ def Main(): is_sqlite3 = False try: - f = open(dbname) - if f.read(15) == "SQLite format 3": + f = open(dbname, "rb") + if f.read(15) == b'SQLite format 3': is_sqlite3 = True f.close() except: |