mirror of
https://github.com/bluekitchen/btstack.git
synced 2025-03-25 16:43:28 +00:00
tool: consistently use raw strings for regular expressions
Newer versions of Python raise a SyntaxWarning when a regular expression contains a backslash that is not part of an escape sequence. To prevent this warning and future exceptions, use raw strings for all regular expressions. Even strings without escape sequences are converted for consistency. Some IDEs will apply special syntax highlighting to raw strings, which can make it easier to decipher regular expressions.
This commit is contained in:
parent
5740227a8e
commit
73677349c9
@ -45,13 +45,13 @@ def main(argv):
|
|||||||
for line in fin:
|
for line in fin:
|
||||||
if not codeblock:
|
if not codeblock:
|
||||||
fout.write(line)
|
fout.write(line)
|
||||||
if re.match('.*<pre><code>.*',line):
|
if re.match(r'.*<pre><code>.*',line):
|
||||||
codeblock = 1
|
codeblock = 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
writeCodeBlock(fout,line, references)
|
writeCodeBlock(fout,line, references)
|
||||||
# check if codeblock ended
|
# check if codeblock ended
|
||||||
if re.match('.*</code></pre>.*',line):
|
if re.match(r'.*</code></pre>.*',line):
|
||||||
codeblock = 0
|
codeblock = 0
|
||||||
|
|
||||||
|
|
||||||
|
@ -88,13 +88,13 @@ def writeAPI(fout, fin, mk_codeidentation):
|
|||||||
|
|
||||||
for line in fin:
|
for line in fin:
|
||||||
if state == State.SearchStartAPI:
|
if state == State.SearchStartAPI:
|
||||||
parts = re.match('.*API_START.*',line)
|
parts = re.match(r'.*API_START.*',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.SearchEndAPI
|
state = State.SearchEndAPI
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if state == State.SearchEndAPI:
|
if state == State.SearchEndAPI:
|
||||||
parts = re.match('.*API_END.*',line)
|
parts = re.match(r'.*API_END.*',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.DoneAPI
|
state = State.DoneAPI
|
||||||
continue
|
continue
|
||||||
@ -115,13 +115,13 @@ def createIndex(fin, filename, api_filepath, api_title, api_label, githuburl):
|
|||||||
linenr = linenr + 1
|
linenr = linenr + 1
|
||||||
|
|
||||||
if state == State.SearchStartAPI:
|
if state == State.SearchStartAPI:
|
||||||
parts = re.match('.*API_START.*',line)
|
parts = re.match(r'.*API_START.*',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.SearchEndAPI
|
state = State.SearchEndAPI
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if state == State.SearchEndAPI:
|
if state == State.SearchEndAPI:
|
||||||
parts = re.match('.*API_END.*',line)
|
parts = re.match(r'.*API_END.*',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.DoneAPI
|
state = State.DoneAPI
|
||||||
continue
|
continue
|
||||||
@ -129,22 +129,22 @@ def createIndex(fin, filename, api_filepath, api_title, api_label, githuburl):
|
|||||||
if isComment(line):
|
if isComment(line):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
param = re.match(".*@brief.*", line)
|
param = re.match(r".*@brief.*", line)
|
||||||
if param:
|
if param:
|
||||||
continue
|
continue
|
||||||
param = re.match(".*@param.*", line)
|
param = re.match(r".*@param.*", line)
|
||||||
if param:
|
if param:
|
||||||
continue
|
continue
|
||||||
param = re.match(".*@return.*", line)
|
param = re.match(r".*@return.*", line)
|
||||||
if param:
|
if param:
|
||||||
continue
|
continue
|
||||||
param = re.match(".*@result.*", line)
|
param = re.match(r".*@result.*", line)
|
||||||
if param:
|
if param:
|
||||||
continue
|
continue
|
||||||
param = re.match(".*@note.*", line)
|
param = re.match(r".*@note.*", line)
|
||||||
if param:
|
if param:
|
||||||
continue
|
continue
|
||||||
param = re.match(".*return.*", line)
|
param = re.match(r".*return.*", line)
|
||||||
if param:
|
if param:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ def writeAPI(fout, infile_name):
|
|||||||
with open(infile_name, 'r') as fin:
|
with open(infile_name, 'r') as fin:
|
||||||
for line in fin:
|
for line in fin:
|
||||||
if state == State.SearchStartAPI:
|
if state == State.SearchStartAPI:
|
||||||
parts = re.match('\s*(/\*).*API_START.*(\*/)',line)
|
parts = re.match(r'\s*(/\*).*API_START.*(\*/)',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.RemoveEmptyLinesAfterAPIStart
|
state = State.RemoveEmptyLinesAfterAPIStart
|
||||||
continue
|
continue
|
||||||
@ -51,7 +51,7 @@ def writeAPI(fout, infile_name):
|
|||||||
state = State.SearchEndAPI
|
state = State.SearchEndAPI
|
||||||
|
|
||||||
if state == State.SearchEndAPI:
|
if state == State.SearchEndAPI:
|
||||||
parts = re.match('\s*(/\*).*API_END.*(\*/)',line)
|
parts = re.match(r'\s*(/\*).*API_END.*(\*/)',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.DoneAPI
|
state = State.DoneAPI
|
||||||
return
|
return
|
||||||
|
@ -58,12 +58,12 @@ def latexText(text, ref_prefix):
|
|||||||
return ""
|
return ""
|
||||||
brief = text.replace(" in the BTstack manual","")
|
brief = text.replace(" in the BTstack manual","")
|
||||||
|
|
||||||
refs = re.match('.*(Listing\s+)(\w+).*',brief)
|
refs = re.match(r'.*(Listing\s+)(\w+).*',brief)
|
||||||
if refs:
|
if refs:
|
||||||
brief = brief.replace(refs.group(1), "[code snippet below]")
|
brief = brief.replace(refs.group(1), "[code snippet below]")
|
||||||
brief = brief.replace(refs.group(2), "(#"+ref_prefix+":" + refs.group(2)+")")
|
brief = brief.replace(refs.group(2), "(#"+ref_prefix+":" + refs.group(2)+")")
|
||||||
|
|
||||||
refs = re.match('.*(Section\s+)(\w+).*',brief)
|
refs = re.match(r'.*(Section\s+)(\w+).*',brief)
|
||||||
if refs:
|
if refs:
|
||||||
brief = brief.replace(refs.group(1), "[here]")
|
brief = brief.replace(refs.group(1), "[here]")
|
||||||
brief = brief.replace(refs.group(2), "(#section:"+refs.group(2)+")")
|
brief = brief.replace(refs.group(2), "(#section:"+refs.group(2)+")")
|
||||||
@ -72,39 +72,39 @@ def latexText(text, ref_prefix):
|
|||||||
|
|
||||||
|
|
||||||
def isEmptyCommentLine(line):
|
def isEmptyCommentLine(line):
|
||||||
return re.match('(\s*\*\s*)\n',line)
|
return re.match(r'(\s*\*\s*)\n',line)
|
||||||
|
|
||||||
|
|
||||||
def isCommentLine(line):
|
def isCommentLine(line):
|
||||||
return re.match('(\s*\*\s*).*',line)
|
return re.match(r'(\s*\*\s*).*',line)
|
||||||
|
|
||||||
|
|
||||||
def isEndOfComment(line):
|
def isEndOfComment(line):
|
||||||
return re.match('\s*\*/.*', line)
|
return re.match(r'\s*\*/.*', line)
|
||||||
|
|
||||||
|
|
||||||
def isNewItem(line):
|
def isNewItem(line):
|
||||||
return re.match('(\s*\*\s*\-\s*)(.*)',line)
|
return re.match(r'(\s*\*\s*\-\s*)(.*)',line)
|
||||||
|
|
||||||
|
|
||||||
def isTextTag(line):
|
def isTextTag(line):
|
||||||
return re.match('.*(@text).*', line)
|
return re.match(r'.*(@text).*', line)
|
||||||
|
|
||||||
|
|
||||||
def isItemizeTag(line):
|
def isItemizeTag(line):
|
||||||
return re.match("(\s+\*\s+)(-\s)(.*)", line)
|
return re.match(r"(\s+\*\s+)(-\s)(.*)", line)
|
||||||
|
|
||||||
|
|
||||||
def processTextLine(line, ref_prefix):
|
def processTextLine(line, ref_prefix):
|
||||||
if isTextTag(line):
|
if isTextTag(line):
|
||||||
text_line_parts = re.match(".*(@text)(.*)", line)
|
text_line_parts = re.match(r".*(@text)(.*)", line)
|
||||||
return " " + latexText(text_line_parts.group(2), ref_prefix)
|
return " " + latexText(text_line_parts.group(2), ref_prefix)
|
||||||
|
|
||||||
if isItemizeTag(line):
|
if isItemizeTag(line):
|
||||||
text_line_parts = re.match("(\s*\*\s*\-\s*)(.*)", line)
|
text_line_parts = re.match(r"(\s*\*\s*\-\s*)(.*)", line)
|
||||||
return "\n- " + latexText(text_line_parts.group(2), ref_prefix)
|
return "\n- " + latexText(text_line_parts.group(2), ref_prefix)
|
||||||
|
|
||||||
text_line_parts = re.match("(\s+\*\s+)(.*)", line)
|
text_line_parts = re.match(r"(\s+\*\s+)(.*)", line)
|
||||||
if text_line_parts:
|
if text_line_parts:
|
||||||
return " " + latexText(text_line_parts.group(2), ref_prefix)
|
return " " + latexText(text_line_parts.group(2), ref_prefix)
|
||||||
return ""
|
return ""
|
||||||
@ -113,7 +113,7 @@ def getExampleTitle(example_path):
|
|||||||
example_title = ''
|
example_title = ''
|
||||||
with open(example_path, 'r') as fin:
|
with open(example_path, 'r') as fin:
|
||||||
for line in fin:
|
for line in fin:
|
||||||
parts = re.match('.*(EXAMPLE_START)\((.*)\):\s*(.*)(\*/)?\n',line)
|
parts = re.match(r'.*(EXAMPLE_START)\((.*)\):\s*(.*)(\*/)?\n',line)
|
||||||
if parts:
|
if parts:
|
||||||
example_title = parts.group(3).replace("_","\_")
|
example_title = parts.group(3).replace("_","\_")
|
||||||
continue
|
continue
|
||||||
@ -154,7 +154,7 @@ def writeListings(aout, infile_name, ref_prefix):
|
|||||||
with open(infile_name, 'r') as fin:
|
with open(infile_name, 'r') as fin:
|
||||||
for line in fin:
|
for line in fin:
|
||||||
if state == State.SearchExampleStart:
|
if state == State.SearchExampleStart:
|
||||||
parts = re.match('.*(EXAMPLE_START)\((.*)\):\s*(.*)(\*/)?\n',line)
|
parts = re.match(r'.*(EXAMPLE_START)\((.*)\):\s*(.*)(\*/)?\n',line)
|
||||||
if parts:
|
if parts:
|
||||||
lable = parts.group(2).replace("_","")
|
lable = parts.group(2).replace("_","")
|
||||||
title = latexText(parts.group(2), ref_prefix)
|
title = latexText(parts.group(2), ref_prefix)
|
||||||
@ -164,13 +164,13 @@ def writeListings(aout, infile_name, ref_prefix):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# detect @section
|
# detect @section
|
||||||
section_parts = re.match('.*(@section)\s*(.*)(:?\s*.?)\*?/?\n',line)
|
section_parts = re.match(r'.*(@section)\s*(.*)(:?\s*.?)\*?/?\n',line)
|
||||||
if section_parts:
|
if section_parts:
|
||||||
aout.write("\n" + example_subsection.replace("SECTION_TITLE", section_parts.group(2)))
|
aout.write("\n" + example_subsection.replace("SECTION_TITLE", section_parts.group(2)))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# detect @subsection
|
# detect @subsection
|
||||||
subsection_parts = re.match('.*(@section)\s*(.*)(:?\s*.?)\*?/?\n',line)
|
subsection_parts = re.match(r'.*(@section)\s*(.*)(:?\s*.?)\*?/?\n',line)
|
||||||
if section_parts:
|
if section_parts:
|
||||||
subsubsection = example_subsection.replace("SECTION_TITLE", section_parts.group(2)).replace('section', 'subsection')
|
subsubsection = example_subsection.replace("SECTION_TITLE", section_parts.group(2)).replace('section', 'subsection')
|
||||||
aout.write("\n" + subsubsection)
|
aout.write("\n" + subsubsection)
|
||||||
@ -215,7 +215,7 @@ def writeListings(aout, infile_name, ref_prefix):
|
|||||||
#continue
|
#continue
|
||||||
|
|
||||||
if state == State.SearchListingStart:
|
if state == State.SearchListingStart:
|
||||||
parts = re.match('.*(LISTING_START)\((.*)\):\s*(.*)(\s+\*/).*',line)
|
parts = re.match(r'.*(LISTING_START)\((.*)\):\s*(.*)(\s+\*/).*',line)
|
||||||
|
|
||||||
if parts:
|
if parts:
|
||||||
lst_lable = parts.group(2).replace("_","")
|
lst_lable = parts.group(2).replace("_","")
|
||||||
@ -227,9 +227,9 @@ def writeListings(aout, infile_name, ref_prefix):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if state == State.SearchListingEnd:
|
if state == State.SearchListingEnd:
|
||||||
parts_end = re.match('.*(LISTING_END).*',line)
|
parts_end = re.match(r'.*(LISTING_END).*',line)
|
||||||
parts_pause = re.match('.*(LISTING_PAUSE).*',line)
|
parts_pause = re.match(r'.*(LISTING_PAUSE).*',line)
|
||||||
end_comment_parts = re.match('.*(\*/)\s*\n', line);
|
end_comment_parts = re.match(r'.*(\*/)\s*\n', line);
|
||||||
|
|
||||||
if parts_end:
|
if parts_end:
|
||||||
aout.write(code_in_listing)
|
aout.write(code_in_listing)
|
||||||
@ -248,12 +248,12 @@ def writeListings(aout, infile_name, ref_prefix):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if state == State.SearchListingResume:
|
if state == State.SearchListingResume:
|
||||||
parts = re.match('.*(LISTING_RESUME).*',line)
|
parts = re.match(r'.*(LISTING_RESUME).*',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.SearchListingEnd
|
state = State.SearchListingEnd
|
||||||
continue
|
continue
|
||||||
|
|
||||||
parts = re.match('.*(EXAMPLE_END).*',line)
|
parts = re.match(r'.*(EXAMPLE_END).*',line)
|
||||||
if parts:
|
if parts:
|
||||||
if state != State.SearchListingStart:
|
if state != State.SearchListingStart:
|
||||||
print("Formating error detected")
|
print("Formating error detected")
|
||||||
|
@ -60,9 +60,9 @@ with open (infile, 'rb') as fin:
|
|||||||
packet = packet.replace("\r","\\r")
|
packet = packet.replace("\r","\\r")
|
||||||
packet = packet.replace("\"","\\\"")
|
packet = packet.replace("\"","\\\"")
|
||||||
|
|
||||||
parts = re.match('HFP_RX(.*)',packet)
|
parts = re.match(r'HFP_RX(.*)',packet)
|
||||||
if not parts:
|
if not parts:
|
||||||
parts = re.match('HFP_TX(.*)',packet)
|
parts = re.match(r'HFP_TX(.*)',packet)
|
||||||
|
|
||||||
cmd = 0
|
cmd = 0
|
||||||
if parts:
|
if parts:
|
||||||
@ -75,7 +75,7 @@ with open (infile, 'rb') as fin:
|
|||||||
separator = ",\n"
|
separator = ",\n"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
parts = re.match('USER:\'(.*)\'.*',packet)
|
parts = re.match(r"USER:'(.*)'.*",packet)
|
||||||
if parts:
|
if parts:
|
||||||
cmd = 'USER:'+parts.groups()[0]
|
cmd = 'USER:'+parts.groups()[0]
|
||||||
print (separator+spaces+"\""+cmd+"\"",)
|
print (separator+spaces+"\""+cmd+"\"",)
|
||||||
|
@ -654,19 +654,19 @@ with open (infile, 'rb') as fin:
|
|||||||
elif type == 0xfc:
|
elif type == 0xfc:
|
||||||
# LOG
|
# LOG
|
||||||
log = packet.decode("utf-8")
|
log = packet.decode("utf-8")
|
||||||
parts = re.match('mesh-iv-index: (.*)', log)
|
parts = re.match(r'mesh-iv-index: (.*)', log)
|
||||||
if parts and len(parts.groups()) == 1:
|
if parts and len(parts.groups()) == 1:
|
||||||
mesh_set_iv_index(int(parts.groups()[0], 16))
|
mesh_set_iv_index(int(parts.groups()[0], 16))
|
||||||
continue
|
continue
|
||||||
parts = re.match('mesh-devkey: (.*)', log)
|
parts = re.match(r'mesh-devkey: (.*)', log)
|
||||||
if parts and len(parts.groups()) == 1:
|
if parts and len(parts.groups()) == 1:
|
||||||
mesh_set_device_key(bytes.fromhex(parts.groups()[0]))
|
mesh_set_device_key(bytes.fromhex(parts.groups()[0]))
|
||||||
continue
|
continue
|
||||||
parts = re.match('mesh-appkey-(.*): (.*)', log)
|
parts = re.match(r'mesh-appkey-(.*): (.*)', log)
|
||||||
if parts and len(parts.groups()) == 2:
|
if parts and len(parts.groups()) == 2:
|
||||||
mesh_add_application_key(int(parts.groups()[0], 16), bytes.fromhex(parts.groups()[1]))
|
mesh_add_application_key(int(parts.groups()[0], 16), bytes.fromhex(parts.groups()[1]))
|
||||||
continue
|
continue
|
||||||
parts = re.match('mesh-netkey-(.*): (.*)', log)
|
parts = re.match(r'mesh-netkey-(.*): (.*)', log)
|
||||||
if parts and len(parts.groups()) == 2:
|
if parts and len(parts.groups()) == 2:
|
||||||
mesh_add_netkey(int(parts.groups()[0], 16), bytes.fromhex(parts.groups()[1]))
|
mesh_add_netkey(int(parts.groups()[0], 16), bytes.fromhex(parts.groups()[1]))
|
||||||
continue
|
continue
|
||||||
|
@ -73,7 +73,7 @@ def scrape_page(fout, url):
|
|||||||
# drop dashes otherwise
|
# drop dashes otherwise
|
||||||
tag = tag.replace('-',' ')
|
tag = tag.replace('-',' ')
|
||||||
# collect multiple spaces
|
# collect multiple spaces
|
||||||
tag = re.sub('\s+', ' ', tag).strip()
|
tag = re.sub(r'\s+', ' ', tag).strip()
|
||||||
# replace space with under score
|
# replace space with under score
|
||||||
tag =tag.replace(' ', '_')
|
tag =tag.replace(' ', '_')
|
||||||
fout.write("#define BLUETOOTH_DATA_TYPE_%-50s %s // %s\n" % (tag, data_type_value, data_type_name))
|
fout.write("#define BLUETOOTH_DATA_TYPE_%-50s %s // %s\n" % (tag, data_type_value, data_type_name))
|
||||||
|
@ -38,8 +38,8 @@ defines = []
|
|||||||
|
|
||||||
# Convert CamelCase to snake_case from http://stackoverflow.com/a/1176023
|
# Convert CamelCase to snake_case from http://stackoverflow.com/a/1176023
|
||||||
def camel_to_underscore(name):
|
def camel_to_underscore(name):
|
||||||
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
|
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', name)
|
||||||
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
|
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
|
||||||
|
|
||||||
def create_pretty_define(name):
|
def create_pretty_define(name):
|
||||||
name = name.lstrip()
|
name = name.lstrip()
|
||||||
|
@ -168,7 +168,7 @@ def read_defines(infile):
|
|||||||
defines = dict()
|
defines = dict()
|
||||||
with open (infile, 'rt') as fin:
|
with open (infile, 'rt') as fin:
|
||||||
for line in fin:
|
for line in fin:
|
||||||
parts = re.match('#define\\s+(\\w+)\\s+(\\w+)',line)
|
parts = re.match(r'#define\s+(\w+)\s+(\w+)',line)
|
||||||
if parts and len(parts.groups()) == 2:
|
if parts and len(parts.groups()) == 2:
|
||||||
(key, value) = parts.groups()
|
(key, value) = parts.groups()
|
||||||
defines[key] = int(value, 16)
|
defines[key] = int(value, 16)
|
||||||
@ -187,12 +187,12 @@ def twoByteLEFor(value):
|
|||||||
return [ (value & 0xff), (value >> 8)]
|
return [ (value & 0xff), (value >> 8)]
|
||||||
|
|
||||||
def is_128bit_uuid(text):
|
def is_128bit_uuid(text):
|
||||||
if re.match("[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}", text):
|
if re.match(r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}", text):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def parseUUID128(uuid):
|
def parseUUID128(uuid):
|
||||||
parts = re.match("([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})", uuid)
|
parts = re.match(r"([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})", uuid)
|
||||||
uuid_bytes = []
|
uuid_bytes = []
|
||||||
for i in range(8, 0, -1):
|
for i in range(8, 0, -1):
|
||||||
uuid_bytes = uuid_bytes + twoByteLEFor(int(parts.group(i),16))
|
uuid_bytes = uuid_bytes + twoByteLEFor(int(parts.group(i),16))
|
||||||
@ -876,10 +876,10 @@ def parseLines(fname_in, fin, fout):
|
|||||||
|
|
||||||
if line.startswith("#import"):
|
if line.startswith("#import"):
|
||||||
imported_file = ''
|
imported_file = ''
|
||||||
parts = re.match('#import\\s+<(.*)>\\w*',line)
|
parts = re.match(r'#import\s+<(.*)>\w*',line)
|
||||||
if parts and len(parts.groups()) == 1:
|
if parts and len(parts.groups()) == 1:
|
||||||
imported_file = parts.groups()[0]
|
imported_file = parts.groups()[0]
|
||||||
parts = re.match('#import\\s+"(.*)"\\w*',line)
|
parts = re.match(r'#import\s+"(.*)"\w*',line)
|
||||||
if parts and len(parts.groups()) == 1:
|
if parts and len(parts.groups()) == 1:
|
||||||
imported_file = parts.groups()[0]
|
imported_file = parts.groups()[0]
|
||||||
if len(imported_file) == 0:
|
if len(imported_file) == 0:
|
||||||
|
@ -119,7 +119,7 @@ with open (outfile, 'wb') as fout:
|
|||||||
# skip empty lines
|
# skip empty lines
|
||||||
if len(line) == 0:
|
if len(line) == 0:
|
||||||
continue
|
continue
|
||||||
parts = re.match('\[(.*)\] (.*)', line)
|
parts = re.match(r'\[(.*)\] (.*)', line)
|
||||||
if parts and len(parts.groups()) == 2:
|
if parts and len(parts.groups()) == 2:
|
||||||
(timestamp, line) = parts.groups()
|
(timestamp, line) = parts.groups()
|
||||||
rest = chop(line,'CMD => ')
|
rest = chop(line,'CMD => ')
|
||||||
|
@ -40,15 +40,15 @@ with open(cstat_file, 'rt') as fin:
|
|||||||
location = location.replace(project_prefix, '').replace('\\','/')
|
location = location.replace(project_prefix, '').replace('\\','/')
|
||||||
parts = location.split(':')
|
parts = location.split(':')
|
||||||
(path, lineno) = parts
|
(path, lineno) = parts
|
||||||
match = re.match("The operands `(.+)' and `(.+)' have essential type categories (.*) and (.*), which do not match.", msg)
|
match = re.match(r"The operands `(.+)' and `(.+)' have essential type categories (.*) and (.*), which do not match.", msg)
|
||||||
# fix if operand is signed literal and cstat complains about signednesss
|
# fix if operand is signed literal and cstat complains about signednesss
|
||||||
if match:
|
if match:
|
||||||
(op1, op2, t1, t2) = match.groups()
|
(op1, op2, t1, t2) = match.groups()
|
||||||
if re.match("[(0x)0-9]+", op1) and t1.startswith('signed'):
|
if re.match(r"[(0x)0-9]+", op1) and t1.startswith('signed'):
|
||||||
fix(path, int(lineno), op1)
|
fix(path, int(lineno), op1)
|
||||||
fixed += 1
|
fixed += 1
|
||||||
continue
|
continue
|
||||||
if re.match("[(0x)0-9]+", op2) and t2.startswith('signed'):
|
if re.match(r"[(0x)0-9]+", op2) and t2.startswith('signed'):
|
||||||
fix(path, int(lineno), op2)
|
fix(path, int(lineno), op2)
|
||||||
fixed += 1
|
fixed += 1
|
||||||
continue
|
continue
|
||||||
|
@ -50,7 +50,7 @@ with open(cstat_file, 'rt') as fin:
|
|||||||
if len(chunks) != 4: continue
|
if len(chunks) != 4: continue
|
||||||
(msg, rule, severity, location) = chunks
|
(msg, rule, severity, location) = chunks
|
||||||
if not rule.startswith('MISRAC2012-Rule-12.1'): continue
|
if not rule.startswith('MISRAC2012-Rule-12.1'): continue
|
||||||
parts = re.match(".*Suggest parentheses.*`(.*)'", msg)
|
parts = re.match(r".*Suggest parentheses.*`(.*)'", msg)
|
||||||
total += 1
|
total += 1
|
||||||
expression = parts.groups()[0]
|
expression = parts.groups()[0]
|
||||||
expression = remove_whitespace(expression)
|
expression = remove_whitespace(expression)
|
||||||
|
@ -78,7 +78,7 @@ def read_and_update_configuration(full_path, line_ending):
|
|||||||
configuration += ("%s%s" % (lines_to_replace[line], line_ending))
|
configuration += ("%s%s" % (lines_to_replace[line], line_ending))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
parts = re.match("#define\\s*(.*)", line)
|
parts = re.match(r"#define\s*(.*)", line)
|
||||||
if parts:
|
if parts:
|
||||||
block.append(parts[1])
|
block.append(parts[1])
|
||||||
else:
|
else:
|
||||||
|
@ -37,7 +37,7 @@ def read_and_update_configuration(full_path, line_ending, root):
|
|||||||
for unstripped_line in fin:
|
for unstripped_line in fin:
|
||||||
line = unstripped_line.strip()
|
line = unstripped_line.strip()
|
||||||
|
|
||||||
parts = re.match('(//\s*btstack_config.h\s)(\w*)', line)
|
parts = re.match(r'(//\s*btstack_config.h\s)(\w*)', line)
|
||||||
if parts and len(parts.groups()) == 2:
|
if parts and len(parts.groups()) == 2:
|
||||||
configuration += config_header_template.replace("PORT_NAME", port_name).replace("\r\n", line_ending)
|
configuration += config_header_template.replace("PORT_NAME", port_name).replace("\r\n", line_ending)
|
||||||
else:
|
else:
|
||||||
|
@ -3,8 +3,8 @@ import os
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
copyrightTitle = ".*(Copyright).*(BlueKitchen GmbH)"
|
copyrightTitle = r".*(Copyright).*(BlueKitchen GmbH)"
|
||||||
copyrightEndString = "Please inquire about"
|
copyrightEndString = r"Please inquire about"
|
||||||
|
|
||||||
findAndReplace = {
|
findAndReplace = {
|
||||||
"MATTHIAS" : "BLUEKITCHEN",
|
"MATTHIAS" : "BLUEKITCHEN",
|
||||||
@ -42,7 +42,7 @@ def updateCopyright(dir_name, file_name):
|
|||||||
|
|
||||||
if state == State.SearchEndCopyright:
|
if state == State.SearchEndCopyright:
|
||||||
# search end of Copyright
|
# search end of Copyright
|
||||||
parts = re.match('\s*(\*\/)\s*',line)
|
parts = re.match(r'\s*(\*\/)\s*',line)
|
||||||
if parts:
|
if parts:
|
||||||
state = State.CopyrightEnd
|
state = State.CopyrightEnd
|
||||||
else:
|
else:
|
||||||
|
@ -5,7 +5,7 @@ import re
|
|||||||
btstack_root = os.path.abspath(os.path.dirname(sys.argv[0]) + '/..')
|
btstack_root = os.path.abspath(os.path.dirname(sys.argv[0]) + '/..')
|
||||||
|
|
||||||
filetag = '#define BTSTACK_FILE__ "%s"\n'
|
filetag = '#define BTSTACK_FILE__ "%s"\n'
|
||||||
filetag_re = '#define BTSTACK_FILE__ \"(.*)\"'
|
filetag_re = r'#define BTSTACK_FILE__ "(.*)"'
|
||||||
|
|
||||||
ignoreFolders = ["3rd-party", "pic32-harmony", "msp430", "cpputest", "test", "msp-exp430f5438-cc2564b", "msp430f5229lp-cc2564b", "ez430-rf2560", "ios", "chipset/cc256x", "docs", "mtk", "port"]
|
ignoreFolders = ["3rd-party", "pic32-harmony", "msp430", "cpputest", "test", "msp-exp430f5438-cc2564b", "msp430f5229lp-cc2564b", "ez430-rf2560", "ios", "chipset/cc256x", "docs", "mtk", "port"]
|
||||||
ignoreFiles = ["ant_cmds.h", "rijndael.c", "btstack_config.h", "btstack_version.h", "profile.h", "bluetoothdrv.h",
|
ignoreFiles = ["ant_cmds.h", "rijndael.c", "btstack_config.h", "btstack_version.h", "profile.h", "bluetoothdrv.h",
|
||||||
@ -32,13 +32,13 @@ def update_filename_tag(dir_name, file_name, has_tag):
|
|||||||
for line in fin:
|
for line in fin:
|
||||||
if state == State.SearchStartComment:
|
if state == State.SearchStartComment:
|
||||||
fout.write(line)
|
fout.write(line)
|
||||||
parts = re.match('\s*(/\*).*(\*/)',line)
|
parts = re.match(r'\s*(/\*).*(\*/)',line)
|
||||||
if parts:
|
if parts:
|
||||||
if len(parts.groups()) == 2:
|
if len(parts.groups()) == 2:
|
||||||
# one line comment
|
# one line comment
|
||||||
continue
|
continue
|
||||||
|
|
||||||
parts = re.match('\s*(/\*).*',line)
|
parts = re.match(r'\s*(/\*).*',line)
|
||||||
if parts:
|
if parts:
|
||||||
# beginning of comment
|
# beginning of comment
|
||||||
state = State.SearchCopyrighter
|
state = State.SearchCopyrighter
|
||||||
@ -46,7 +46,7 @@ def update_filename_tag(dir_name, file_name, has_tag):
|
|||||||
|
|
||||||
if state == State.SearchCopyrighter:
|
if state == State.SearchCopyrighter:
|
||||||
fout.write(line)
|
fout.write(line)
|
||||||
parts = re.match('.*(\*/)',line)
|
parts = re.match(r'.*(\*/)',line)
|
||||||
if parts:
|
if parts:
|
||||||
# end of comment
|
# end of comment
|
||||||
state = State.SearchStartComment
|
state = State.SearchStartComment
|
||||||
|
@ -17,7 +17,7 @@ def twoByteLEFor(value):
|
|||||||
return [ (value & 0xff), (value >> 8)]
|
return [ (value & 0xff), (value >> 8)]
|
||||||
|
|
||||||
def parseUUID128(uuid):
|
def parseUUID128(uuid):
|
||||||
parts = re.match("([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})", uuid)
|
parts = re.match(r"([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})-([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})([0-9A-Fa-f]{4})", uuid)
|
||||||
uuid_bytes = []
|
uuid_bytes = []
|
||||||
for i in range(8, 0, -1):
|
for i in range(8, 0, -1):
|
||||||
uuid_bytes = uuid_bytes + twoByteLEFor(int(parts.group(i),16))
|
uuid_bytes = uuid_bytes + twoByteLEFor(int(parts.group(i),16))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user