spatch

Lenient universal diff patcher
git clone https://git.sinitax.com/sinitax/spatch
Log | Files | Refs | LICENSE | sfeed.txt

commit 596b0c08a7326a28c11d6c66e60e591fb642b0c1
parent ff265082fdc8cdb521d8bcf2d3b55fb6ea9797a6
Author: Louis Burda <quent.burda@gmail.com>
Date:   Thu, 22 Jun 2023 18:02:54 +0200

Add MIT License and rename patch.py to spatch

Diffstat:
ALICENSE | 21+++++++++++++++++++++
Dpatch.py | 139-------------------------------------------------------------------------------
Aspatch | 137+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 158 insertions(+), 139 deletions(-)

diff --git a/LICENSE b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Louis Burda + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/patch.py b/patch.py @@ -1,139 +0,0 @@ -#!/usr/bin/env python3 - -import sys, os, re -from os import path - -file_header = """\ ---- ([^\\n\\t]*)(\\t[^\n]*)? -\+\+\+ ([^\\n\\t]*)(\\t[^\\n]*)? -""" -file_header_pattern = re.compile(file_header) - -chunk_header = "@@ -([0-9]*),([0-9]*) \+([0-9]*),([0-9]*) @@(.*)\n"; -chunk_header_pattern = re.compile(chunk_header) - -def patch_file(src_filename, dst_filename, content): - prev_header_match = chunk_header_pattern.search(content) - if prev_header_match == None: - print("[CHUNK] No chunks found, skipping.") - return - - src_content = open(src_filename, "r").read() - - chunks = list() - next_header_match = True # for do_while loop - while next_header_match: - next_header_match = chunk_header_pattern\ - .search(content, prev_header_match.span()[1]) - chunks.append(prev_header_match) - prev_header_match = next_header_match - - for i,c in enumerate(chunks): - src_line = c.group(1) - src_count = c.group(2) - dst_line = c.group(3) - dst_count = c.group(4) - comment = c.group(5).strip() - - if comment != "": - print("[CHUNK] Applying chunk with comment: {}".format(comment)) - else: - print("[CHUNK] Applying chunk at line: {}".format(src_line)) - - start_pos = c.span()[1] - if i != len(chunks) - 1: - end_pos = chunks[i + 1].span()[0] - else: - end_pos = len(content) - - chunk_content = content[start_pos:end_pos].split("\n") - valid_lines = 0 - for l in chunk_content: - if len(l) == 0 or l[0] not in (' ', '+', '-', '\\'): - break - valid_lines += 1 - - chunk_content = chunk_content[:valid_lines] - - src_lines = "\n".join([l[1:] for l in chunk_content if l[0] in (' ', '-')]) - dst_lines = "\n".join([l[1:] for l in chunk_content if l[0] in (' ', '+')]) - - if src_lines == 0 and dst_lines == 0: - print("[ERROR] Chunk has no valid lines") - sys.exit(1) - - src_nl = dst_nl = True - for i,l in enumerate(chunk_content): - if i != 0 and l == '\ No newline at end of file': - if chunk_content[i-1][0] == '+': - src_nl = False - elif chunk_content[i-1][0] == '-': - dst_nl = False - src_lines += "\n" if src_nl and len(src_lines) > 0 else "" - dst_lines += "\n" if dst_nl and len(dst_lines) > 0 else "" - - try: - replace_start = src_content.index(src_lines) - src_content = src_content[:replace_start] \ - + dst_lines + src_content[replace_start+len(src_lines):] - except Exception as e: - print("[ERROR] Failed to find corresponding lines for chunk, exiting..") - sys.exit(1) - - open(dst_filename, "w+").write(src_content) - -def main(): - if len(sys.argv) < 2: - print("Supply the path of a unified diff file as argument") - return 1 - elif len(sys.argv) == 3: - targetdir = sys.argv[2] - else: - targetdir = None - - noprompt = (os.getenv("NOPROMPT") != None) - diff_file = sys.argv[1] - content = open(diff_file).read() - - prev_header_match = file_header_pattern.search(content) - if prev_header_match == None: - print("[ERROR] Not a unified diff file!") - return 1 - header_matches = list() - next_header_match = True # for do_while loop - while (next_header_match): - next_header_match = file_header_pattern.search(content, prev_header_match.span()[1]) - header_matches.append(prev_header_match) - prev_header_match = next_header_match - - print("[GLOBAL] Processing diff file '{}'".format(diff_file)) - print("[GLOBAL] Found {} file patch headers..".format(len(header_matches))) - - for i in range(len(header_matches)): - if len(header_matches[i].groups()) == 4: - src_file = header_matches[i].group(1) - dst_file = header_matches[i].group(3) - else: - src_file = header_matches[i].group(1) - dst_file = header_matches[i].group(2) - - if targetdir: - if src_file[0] != "/": - src_file = targetdir + "/" + src_file.split("/",1)[1] - if dst_file[0] != "/": - dst_file = targetdir + "/" + dst_file.split("/",1)[1] - - print("[PATCH] Applying patch from {} to {}".format(src_file, dst_file)) - - startpos = header_matches[i].span()[1] - if i == len(header_matches) - 1: - endpos = len(content) - else: - endpos = header_matches[i + 1].span()[0] - - patch_file(src_file, dst_file, content[startpos:endpos]) - - return 0 - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spatch b/spatch @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 + +import sys, re + +file_header = """\ +--- ([^\\n\\t]*)(\\t[^\n]*)? +\+\+\+ ([^\\n\\t]*)(\\t[^\\n]*)? +""" +file_header_pattern = re.compile(file_header) + +chunk_header = "@@ -([0-9]*),([0-9]*) \+([0-9]*),([0-9]*) @@(.*)\n"; +chunk_header_pattern = re.compile(chunk_header) + +def patch_file(src_filename, dst_filename, content): + prev_header_match = chunk_header_pattern.search(content) + if prev_header_match == None: + print("[CHUNK] No chunks found, skipping.") + return + + src_content = open(src_filename, "r").read() + + chunks = list() + next_header_match = True # for do_while loop + while next_header_match: + next_header_match = chunk_header_pattern\ + .search(content, prev_header_match.span()[1]) + chunks.append(prev_header_match) + prev_header_match = next_header_match + + for i,c in enumerate(chunks): + src_line = c.group(1) + src_count = c.group(2) + dst_line = c.group(3) + dst_count = c.group(4) + comment = c.group(5).strip() + + if comment != "": + print("[CHUNK] Applying chunk with comment: {}".format(comment)) + else: + print("[CHUNK] Applying chunk at line: {}".format(src_line)) + + start_pos = c.span()[1] + if i != len(chunks) - 1: + end_pos = chunks[i + 1].span()[0] + else: + end_pos = len(content) + + chunk_content = content[start_pos:end_pos].split("\n") + valid_lines = 0 + for l in chunk_content: + if len(l) == 0 or l[0] not in (' ', '+', '-', '\\'): + break + valid_lines += 1 + + chunk_content = chunk_content[:valid_lines] + + src_lines = "\n".join([l[1:] for l in chunk_content if l[0] in (' ', '-')]) + dst_lines = "\n".join([l[1:] for l in chunk_content if l[0] in (' ', '+')]) + + if src_lines == 0 and dst_lines == 0: + print("[ERROR] Chunk has no valid lines") + sys.exit(1) + + src_nl = dst_nl = True + for i,l in enumerate(chunk_content): + if i != 0 and l == '\\ No newline at end of file': + if chunk_content[i-1][0] == '+': + src_nl = False + elif chunk_content[i-1][0] == '-': + dst_nl = False + src_lines += "\n" if src_nl and len(src_lines) > 0 else "" + dst_lines += "\n" if dst_nl and len(dst_lines) > 0 else "" + + try: + replace_start = src_content.index(src_lines) + src_content = src_content[:replace_start] \ + + dst_lines + src_content[replace_start+len(src_lines):] + except Exception as e: + print("[ERROR] Failed to find corresponding lines for chunk, exiting..") + sys.exit(1) + + open(dst_filename, "w+").write(src_content) + +def main(): + if len(sys.argv) < 2: + print("Supply the path of a unified diff file as argument") + return 1 + elif len(sys.argv) == 3: + targetdir = sys.argv[2] + else: + targetdir = None + + diff_file = sys.argv[1] + content = open(diff_file).read() + + prev_header_match = file_header_pattern.search(content) + if prev_header_match == None: + print("[ERROR] Not a unified diff file!") + return 1 + header_matches = list() + next_header_match = True # for do_while loop + while next_header_match: + next_header_match = file_header_pattern.search(content, prev_header_match.span()[1]) + header_matches.append(prev_header_match) + prev_header_match = next_header_match + + print("[GLOBAL] Processing diff file '{}'".format(diff_file)) + print("[GLOBAL] Found {} file patch headers..".format(len(header_matches))) + + for i in range(len(header_matches)): + if len(header_matches[i].groups()) == 4: + src_file = header_matches[i].group(1) + dst_file = header_matches[i].group(3) + else: + src_file = header_matches[i].group(1) + dst_file = header_matches[i].group(2) + + if targetdir: + if src_file[0] != "/": + src_file = targetdir + "/" + src_file.split("/",1)[1] + if dst_file[0] != "/": + dst_file = targetdir + "/" + dst_file.split("/",1)[1] + + print("[PATCH] Applying patch from {} to {}".format(src_file, dst_file)) + + startpos = header_matches[i].span()[1] + if i == len(header_matches) - 1: + endpos = len(content) + else: + endpos = header_matches[i + 1].span()[0] + + patch_file(src_file, dst_file, content[startpos:endpos]) + + return 0 + +if __name__ == "__main__": + sys.exit(main())