first commit
This commit is contained in:
149
pyutils/adb_tools.py
Normal file
149
pyutils/adb_tools.py
Normal file
@@ -0,0 +1,149 @@
|
||||
import os
|
||||
import platform
|
||||
import requests
|
||||
import zipfile
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
# define
|
||||
url = 'https://googledownloads.cn/android/repository/platform-tools-latest-{}.zip'
|
||||
|
||||
# 判断操作系统类型
|
||||
os_type = platform.system().lower()
|
||||
url = url.format(os_type).lower()
|
||||
|
||||
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||
project_path = os.path.dirname(current_path)
|
||||
platform_tools_path = os.path.join(project_path, 'platform-tools')
|
||||
platform_tools_system_path = os.path.join(platform_tools_path, os_type)
|
||||
# *****/adb.exce
|
||||
adb_exe_path = os.path.join(platform_tools_system_path, "adb")
|
||||
|
||||
# platform_tools_path = os.path.dirname(adb_exe_path)
|
||||
|
||||
platform_tools_zip_file_name = f'platform-tools-latest-{os_type.lower()}.zip'
|
||||
|
||||
|
||||
def download(url, path):
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
r = requests.get(url, stream=True)
|
||||
with open(path, 'wb') as f:
|
||||
f.write(r.content)
|
||||
|
||||
|
||||
def move_and_remove(src, dest):
|
||||
if not os.path.exists(dest):
|
||||
os.makedirs(dest)
|
||||
if os.path.exists(src) and os.path.isdir(src):
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dest, item)
|
||||
if os.path.isdir(s):
|
||||
shutil.move(s, d)
|
||||
else:
|
||||
shutil.move(s, dest)
|
||||
os.rmdir(src)
|
||||
else:
|
||||
print("Source directory does not exist.")
|
||||
|
||||
|
||||
def check_platform_tools():
|
||||
if os_type == 'linux' or os_type == 'darwin':
|
||||
if os.path.exists(adb_exe_path):
|
||||
return
|
||||
elif os_type == 'windows':
|
||||
if os.path.exists(f'{adb_exe_path}.exe'):
|
||||
return
|
||||
download(url, os.path.join(platform_tools_path, platform_tools_zip_file_name))
|
||||
unzip_file(os.path.join(platform_tools_path, platform_tools_zip_file_name), platform_tools_path)
|
||||
move_and_remove(os.path.join(platform_tools_path, 'platform-tools'),
|
||||
os.path.join(platform_tools_path, os_type.lower()))
|
||||
|
||||
|
||||
# 解压zip文件
|
||||
def unzip_file(zip_path, extract_path):
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(extract_path)
|
||||
|
||||
|
||||
def pull_file(source, target):
|
||||
if source.startswith('/'):
|
||||
source = source.replace('\\', '/')
|
||||
return run_adb_command([source, target], op='pull')
|
||||
|
||||
|
||||
def push_file(source, target):
|
||||
return run_adb_command([source, target], op='push')
|
||||
|
||||
|
||||
def run_adb_command(command, op='shell', print_log=False):
|
||||
args = [adb_exe_path, op]
|
||||
if isinstance(command, list):
|
||||
args.extend(command)
|
||||
else:
|
||||
args.append(command)
|
||||
|
||||
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = process.communicate()
|
||||
|
||||
# if len(out) > 0:
|
||||
# out = out.strip()
|
||||
response = b''
|
||||
if process.returncode != 0:
|
||||
response = err
|
||||
else:
|
||||
response = out
|
||||
if response != b'' and print_log:
|
||||
lines = response.decode('utf-8').split('\r\r\n')
|
||||
lines = [line for line in lines if line]
|
||||
print(lines)
|
||||
return out
|
||||
|
||||
|
||||
def get_sub_dirs(path):
|
||||
if path.startswith('/'):
|
||||
path = path.replace('\\', '/')
|
||||
run_adb_command("ls -lha {} |grep -ve '\.' |grep -v ' -> '|grep ^d|awk {}".format(path,"'{print $9}'"), op='shell')
|
||||
out = run_adb_command("ls -lha {} |grep -ve '\.' |grep -v ' -> '|grep ^d|awk {}".format(path,"'{print $9}'"), op='shell')
|
||||
file_lists = out.decode('utf-8').split('\r\r\n')
|
||||
file_lists = [file_name for file_name in file_lists if file_name]
|
||||
return file_lists
|
||||
def get_sub_files(path):
|
||||
if path.startswith('/'):
|
||||
path = path.replace('\\', '/')
|
||||
out = run_adb_command("ls -lha {} |grep -v ^d |grep -v ^l|grep -v 'total '|awk {}".format(path,"'{print $9}'"), op='shell')
|
||||
file_lists = out.decode('utf-8').split('\r\r\n')
|
||||
file_lists = [file_name for file_name in file_lists if file_name]
|
||||
return file_lists
|
||||
|
||||
def get_partitions_from_system():
|
||||
out = run_adb_command("cat /proc/partitions |grep -v major|grep mtd |awk '{print $3}'")
|
||||
size_lists = out.decode('utf-8').split('\r\r\n')
|
||||
size_lists = [mtd_size for mtd_size in size_lists if mtd_size]
|
||||
|
||||
# out = run_adb_command("cat /proc/partitions |grep -v major|grep mtd |awk '{print $4}'")
|
||||
# name_lists = out.decode('utf-8').split('\r\r\n')
|
||||
# name_lists = [mtd_name for mtd_name in name_lists if mtd_name]
|
||||
|
||||
out = run_adb_command("cat /proc/mtd |grep mtd |awk '{print $4}'")
|
||||
out = out.replace(b'"', b'')
|
||||
name_lists = out.decode('utf-8').split('\r\r\n')
|
||||
name_lists = [mtd_name for mtd_name in name_lists if mtd_name]
|
||||
|
||||
start_index = 0
|
||||
partitions = []
|
||||
for i in range(len(name_lists)):
|
||||
mtd_name = name_lists[i]
|
||||
mtd_size = int(size_lists[i]) * 1024
|
||||
partitions.append((f'{i}.{mtd_name}',start_index,mtd_size))
|
||||
start_index += mtd_size
|
||||
return partitions
|
||||
|
||||
|
||||
|
||||
check_platform_tools()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pass
|
||||
53
pyutils/check_and_delete_files.py
Normal file
53
pyutils/check_and_delete_files.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import os
|
||||
|
||||
from pyutils import configs
|
||||
|
||||
|
||||
def delete_files(dir_path, files_to_delete):
|
||||
print(f"n\n\n#######################\nstart to delete in directory:\n{dir_path}")
|
||||
|
||||
for file_name in files_to_delete:
|
||||
file_path = os.path.join(dir_path, file_name)
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
print(f"Deleted {file_name}")
|
||||
|
||||
|
||||
def check_files(dir_path, files_to_check, check_extra=True):
|
||||
|
||||
print(f"\n\n\n#######################\nstart to scan directory:\n{dir_path}")
|
||||
dir_files = os.listdir(dir_path)
|
||||
|
||||
missing_files = [file for file in files_to_check if file not in dir_files]
|
||||
if missing_files:
|
||||
print(f"The following files are missing:\n {', '.join(missing_files)}")
|
||||
|
||||
if check_extra:
|
||||
extra_files = [file for file in dir_files if file not in files_to_check]
|
||||
if extra_files:
|
||||
print(
|
||||
f"The following extra files found in the directory:\n {', '.join(extra_files)}"
|
||||
)
|
||||
|
||||
|
||||
def main(default_config_path, rootfs_directory):
|
||||
|
||||
script_path = os.path.abspath(__file__)
|
||||
current_directory = os.path.dirname(script_path)
|
||||
|
||||
|
||||
dir_list = ['bin', 'sbin']
|
||||
# dir_list = ['bin', 'sbin', 'usr/bin', 'usr/sbin']
|
||||
|
||||
# usr_bin_check = [,[[,cal,traceroute,find,traceroute6,users,dumpleases,top,awk,cut,basename,tr,unzip,dirname,env,killall,head,tty,groups,sort,hostid,expr,free,tftp,[,wc,id,md5sum,test,
|
||||
# usr_sbin_check = arping,brctl,dhcprelay,udhcpd
|
||||
for dir in dir_list:
|
||||
dir_path = os.path.join(rootfs_directory, dir)
|
||||
bin_to_delete = configs.get_item_list(default_config_path,dir,'delete_file_list') # ["terminal_mgmt", "tc_tbf.sh"]
|
||||
bin_to_check = configs.get_item_list(default_config_path,dir,'base_file_list')
|
||||
check_files(dir_path, bin_to_check)
|
||||
delete_files(dir_path, bin_to_delete)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
14
pyutils/configs.py
Normal file
14
pyutils/configs.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import configparser
|
||||
|
||||
|
||||
def get_item(config_file_path, section, option):
|
||||
config = configparser.ConfigParser()
|
||||
config.read(config_file_path)
|
||||
return config.get(section, option)
|
||||
|
||||
def get_item_list(config_file_path, section, option):
|
||||
valye_list = get_item(config_file_path, section, option)
|
||||
valye_list = valye_list.split(',') if valye_list else []
|
||||
valye_list = [x for x in valye_list if x != '']
|
||||
return valye_list
|
||||
|
||||
174
pyutils/default_parameter_default
Normal file
174
pyutils/default_parameter_default
Normal file
@@ -0,0 +1,174 @@
|
||||
appKeyMobile=
|
||||
auto_connect_when_limited=no
|
||||
cdrom_state=1
|
||||
|
||||
cstm_webui_ttl=1
|
||||
cstm_webui_imei=1
|
||||
cstm_webui_simswitch=1
|
||||
cstm_webui_unlocksim=1
|
||||
cstm_webui_dns=1
|
||||
cstm_webui_bandselect=1
|
||||
cstm_webui_restartplan=1
|
||||
|
||||
|
||||
terminal_mgmt_enable=0
|
||||
band_select_enable=1
|
||||
dns_manual_func_enable=1
|
||||
ussd_enable=1
|
||||
user_ctrl_wifi_enable=1
|
||||
wifiEnabled=1
|
||||
vpn_enable=1
|
||||
telnetd_enable=n
|
||||
dns_manual_enable=1
|
||||
dosenable=0
|
||||
enable_lpa=0
|
||||
debug_mode=
|
||||
|
||||
fota_allowRoamingUpdate=0
|
||||
fota_dl_pkg_size=0
|
||||
fota_need_user_confirm_download=1
|
||||
fota_need_user_confirm_update=1
|
||||
fota_pkg_downloaded=0
|
||||
fota_pkg_total_size=0
|
||||
fota_test_mode=0
|
||||
fota_update_flag=
|
||||
fota_updateIntervalDay=0
|
||||
fota_updateMode=1
|
||||
fota_upgrade_result=
|
||||
fota_upgrade_result_internal=
|
||||
fota_version_delta_id=
|
||||
fota_version_delta_url=
|
||||
fota_version_file_size=
|
||||
fota_version_force_install=0
|
||||
fota_version_md5sum=
|
||||
fota_version_name=
|
||||
fota_chk_url_gs=
|
||||
fota_chk_url_zx=
|
||||
fota_device_type=
|
||||
fota_dl_url_gs=
|
||||
fota_dl_url_zx=
|
||||
fota_dm_vendor=rs
|
||||
fota_models=
|
||||
fota_network_type=
|
||||
fota_oem=
|
||||
fota_platform=
|
||||
fota_product_id=
|
||||
fota_product_secret=
|
||||
fota_reg_url_gs=
|
||||
fota_reg_url_zx=
|
||||
fota_report_dlr_url_gs=
|
||||
fota_report_dlr_url_zx=
|
||||
fota_report_sales_url_gs=
|
||||
fota_report_sales_url_zx=
|
||||
fota_report_upgr_url_gs=
|
||||
fota_report_upgr_url_zx=
|
||||
fota_token_gs=
|
||||
fota_token_rs=
|
||||
fota_token_zx=
|
||||
fota_update_space_threshold=500
|
||||
|
||||
idle_time="600"
|
||||
is_traffic_aline_on=no
|
||||
is_traffic_alining=no
|
||||
is_traffic_limit_on=no
|
||||
keep_online_when_limited=no
|
||||
lan_domain_Enabled=1
|
||||
need_support_pb=yes
|
||||
need_support_sms=yes
|
||||
|
||||
|
||||
usb_acm_num_amt=0
|
||||
usb_acm_num_debug=0
|
||||
usb_acm_num_factory=0
|
||||
usb_acm_num_user=0
|
||||
usb_devices_amt=serial,diag,adb
|
||||
usb_devices_debug=diag,adb,serial
|
||||
usb_devices_factory=serial,diag,adb
|
||||
usb_devices_user=
|
||||
usb_lun_type_cdrom=
|
||||
usb_lun_type_debug=
|
||||
usb_lun_type_user=
|
||||
usb_serial_num_amt=2
|
||||
usb_serial_num_debug=2
|
||||
usb_serial_num_factory=1
|
||||
usb_serial_num_user=0
|
||||
|
||||
ACL_mode=0
|
||||
AccessControlList0=
|
||||
AccessPolicy0=0
|
||||
Channel=0
|
||||
DDNS=
|
||||
DDNS_Enable=0
|
||||
DDNSAccount=
|
||||
DDNSPassword=
|
||||
DDNSProvider=
|
||||
DMZEnable=0
|
||||
DMZIPAddress=
|
||||
DefaultFirewallPolicy=0
|
||||
DefaultKeyID=0
|
||||
DtimPeriod=1
|
||||
HOST_FIELD=Host: www.baidu1.com
|
||||
HT_GI=1
|
||||
LanEnable=1
|
||||
MAX_Access_num=100
|
||||
MAX_Access_num_bak=100
|
||||
RemoteManagement=0
|
||||
TM_SERVER_NAME=www.baidu1.com
|
||||
TM_SERVER_PORT=65535
|
||||
TURNOFF_CHR_NUM=
|
||||
URI_FIELD=/uploadinfo_v
|
||||
admin_Password=admin
|
||||
|
||||
lpa_appid=
|
||||
lpa_appsecret=
|
||||
lpa_bpp_iccid=
|
||||
lpa_dl_retry=2
|
||||
lpa_trigger_event_url=/esim_uni_plus_server/api/event
|
||||
lpa_trigger_host=www.baidu1.com
|
||||
lpa_trigger_port=65535
|
||||
lpa_trigger_updata_url=/esim_uni_plus_server/api/updata
|
||||
lpa_updata_retry=1
|
||||
monitor_apps=
|
||||
monitor_period=0
|
||||
nv_save_interval=300
|
||||
os_url=http://www.baidu1.com
|
||||
root_Password=factoryAdmin
|
||||
safecare_chatsite=
|
||||
safecare_contimeinterval=
|
||||
safecare_contimestart1=
|
||||
safecare_contimestart2=
|
||||
safecare_contimestart3=
|
||||
safecare_contimestop1=
|
||||
safecare_contimestop2=
|
||||
safecare_contimestop3=
|
||||
safecare_enbale=0
|
||||
safecare_hostname=www.baidu1.com
|
||||
safecare_mobilenumber=
|
||||
safecare_mobsite=http://www.baidu1.com
|
||||
safecare_platno=
|
||||
safecare_registed_iccid=
|
||||
safecare_registed_imei=
|
||||
safecare_version=
|
||||
sim_auto_switch_enable=0
|
||||
sim_lock_status=unlock
|
||||
tc_downlink=
|
||||
tc_enable=0
|
||||
tc_uplink=
|
||||
time_limited=
|
||||
tr069_acs_auth_enable=0
|
||||
tr069_acs_password=
|
||||
tr069_acs_url=
|
||||
tr069_acs_username=
|
||||
tr069_cpe_auth_enable=0
|
||||
tr069_cpe_password=
|
||||
tr069_cpe_username=
|
||||
tr069_enable=0
|
||||
tr069_func_enable=0
|
||||
tr069_httpd_port=7547
|
||||
tr069_inform_enable=1
|
||||
tr069_inform_interval=600
|
||||
tr069_private_name=
|
||||
traffic_mgmt_enable=0
|
||||
uart_control=0
|
||||
uart_ctstrs_enable=
|
||||
vsim_bin_path=/mnt/userdata/vSim.bin.bak
|
||||
58
pyutils/nv_compare.py
Normal file
58
pyutils/nv_compare.py
Normal file
@@ -0,0 +1,58 @@
|
||||
def parse_config(file_path):
|
||||
config = {}
|
||||
with open(file_path, 'r', encoding='utf-8') as file:
|
||||
for line in file:
|
||||
if '=' in line:
|
||||
key, value = line.strip().split('=', 1)
|
||||
config[key] = value
|
||||
#else:
|
||||
# print(f"Ignoring line: {line.strip()} as it does not contain a key-value pair.")
|
||||
return config
|
||||
|
||||
|
||||
def compare_configs(file1, file2):
|
||||
result = {
|
||||
'unique_in_file1': {},
|
||||
'unique_in_file2': {},
|
||||
'different_values': {}
|
||||
}
|
||||
|
||||
config1 = parse_config(file1)
|
||||
config2 = parse_config(file2)
|
||||
|
||||
for key, value in config1.items():
|
||||
if key in config2:
|
||||
if config2[key] != value:
|
||||
result['different_values'][key] = (value, config2[key])
|
||||
else:
|
||||
result['unique_in_file1'][key] = value
|
||||
|
||||
for key, value in config2.items():
|
||||
if key not in config1:
|
||||
result['unique_in_file2'][key] = value
|
||||
|
||||
return result
|
||||
|
||||
def main(config_file1, config_file2):
|
||||
|
||||
|
||||
result = compare_configs(config_file1, config_file2)
|
||||
|
||||
print("Keys unique to first file:")
|
||||
for key in sorted(result['unique_in_file1'].keys(), key=lambda x: (x.split('_')[0], *x.split('_')[1:])):
|
||||
print(f"{key.replace(':', '=')}: {result['unique_in_file1'][key]}")
|
||||
|
||||
print("\nKeys unique to second file:")
|
||||
for key in sorted(result['unique_in_file2'].keys(), key=lambda x: (x.split('_')[0], *x.split('_')[1:])):
|
||||
print(f"{key.replace(':', '=')}: {result['unique_in_file2'][key]}")
|
||||
|
||||
print("\nKeys with different values:")
|
||||
for key in sorted(result['different_values'].keys(), key=lambda x: (x.split('_')[0], *x.split('_')[1:])):
|
||||
value = result['different_values'][key]
|
||||
print(f"{key.replace(':', '=')}: {value[0]} vs {value[1]}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
config_file1 = r'D:\wifi\随身wifi助手\TQ\f231zc_v1.0_jd\info\nv1.txt'
|
||||
config_file2 = r'D:\wifi\随身wifi助手\TQ\f231zc_v1.0_jd\pull\etc_ro\default\default_parameter_user'
|
||||
main()
|
||||
59
pyutils/nv_replace.py
Normal file
59
pyutils/nv_replace.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
def parse_config(file_path):
|
||||
config = {}
|
||||
with open(file_path, 'r') as file:
|
||||
for line in file:
|
||||
if '=' in line:
|
||||
key, value = line.strip().split('=', 1)
|
||||
config[key] = value
|
||||
return config
|
||||
|
||||
def update_config_with_default(default_config, target_file):
|
||||
default_data = parse_config(default_config)
|
||||
target_data = parse_config(target_file)
|
||||
|
||||
updated = False
|
||||
|
||||
for key, value in default_data.items():
|
||||
if key not in target_data:
|
||||
print(f"Key '{key}' does not exist in target file")
|
||||
|
||||
elif target_data[key] != value:
|
||||
print(f"Key '{key}' mismatch: Source='{target_data[key]}', New='{value}'")
|
||||
target_data[key] = value
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
# 备份原始文件
|
||||
bak_file = target_file + '.bak'
|
||||
with open(bak_file, 'w') as file:
|
||||
with open(target_file, 'r') as f:
|
||||
file.write(f.read())
|
||||
|
||||
# 写入更新后的配置
|
||||
with open(target_file, 'w') as file:
|
||||
for key, value in target_data.items():
|
||||
file.write(f"{key}={value}\n")
|
||||
|
||||
|
||||
def main(target_rootfs_path):
|
||||
default_config_file = "default_parameter_default" # 你的默认配置文件
|
||||
target_files = ["default_parameter_ro", "default_parameter_sys", "default_parameter_user"] # 你的目标配置文件列表
|
||||
|
||||
for target_file in target_files:
|
||||
script_path = os.path.abspath(__file__)
|
||||
current_directory = os.path.dirname(script_path)
|
||||
|
||||
default_config_file = os.path.join(current_directory, default_config_file)
|
||||
target_file = os.path.join(os.path.join(target_rootfs_path, 'etc_ro/default'), target_file)
|
||||
update_config_with_default(default_config_file, target_file)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
script_path = os.path.abspath(__file__)
|
||||
current_directory = os.path.dirname(script_path)
|
||||
target_rootfs_path = os.path.join(current_directory, 'squashfs-root')
|
||||
main(target_rootfs_path)
|
||||
|
||||
33
pyutils/nv_sort.py
Normal file
33
pyutils/nv_sort.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import sys
|
||||
|
||||
def parse_config(file_path):
|
||||
config = {}
|
||||
with open(file_path, 'r') as file:
|
||||
for line in file:
|
||||
if '=' in line:
|
||||
key, value = line.strip().split('=', 1)
|
||||
config[key] = value
|
||||
#else:
|
||||
# print(f"Ignoring line: {line.strip()} as it does not contain a key-value pair.")
|
||||
return config
|
||||
|
||||
def print_sorted_config(file_path):
|
||||
try:
|
||||
data = parse_config(file_path)
|
||||
sorted_keys = sorted(data.keys(), key=lambda x: (x.split('_')[0], *x.split('_')[1:]))
|
||||
for key in sorted_keys:
|
||||
print(f"{key}={data[key]}")
|
||||
except FileNotFoundError:
|
||||
print("指定的文件路径不存在。请提供正确的文件路径。")
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
print("请提供文件路径作为参数。")
|
||||
else:
|
||||
file_path = sys.argv[1]
|
||||
print_sorted_config(file_path)
|
||||
|
||||
# print_sorted_config('D:\wifi\随身wifi助手\TQ\mf761w_v1.1_changcheng_suning\pull\etc_ro\default\default_parameter_user')
|
||||
|
||||
130
pyutils/squashfs_extract.py
Normal file
130
pyutils/squashfs_extract.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import struct
|
||||
|
||||
|
||||
def find_squashfs(filename):
|
||||
# Squashfs filesystem, little endian, version 4.0, compression:xz
|
||||
magic = '68 73 71 73 ** ** 00 00 ** ** ** ** 00 00 ** 00 ** 00 00 00 ** 00 ** 00 ** ** ** 00 ** 00 00 00 ** ** ** ** 00 00 00 00 ** ** ** 00 00 00 00 00 ** ** ** 00 00 00 00 00 FF FF FF FF FF FF FF FF'
|
||||
hex_bytes_list = magic.replace(' ','').split('**')
|
||||
first_bytes = bytes.fromhex(hex_bytes_list[0])
|
||||
squashfs_files_list = []
|
||||
with open(filename, 'rb') as f:
|
||||
data = f.read()
|
||||
|
||||
index = data.find(first_bytes)
|
||||
start_index = index
|
||||
file_size = 0
|
||||
while index != -1: # 当找到了匹配的字节序列时
|
||||
find_flag = True
|
||||
f.seek(start_index)
|
||||
print(f"在偏移位置 {start_index} 找到了字节序列")
|
||||
for line in hex_bytes_list:
|
||||
line_byte_size = int(len(line)/2)
|
||||
if line_byte_size == 0 :
|
||||
index = int(index + 1)
|
||||
continue
|
||||
f.seek(index)
|
||||
if f.read(int(line_byte_size)) == bytes.fromhex(line):
|
||||
index = int(index + line_byte_size + 1)
|
||||
continue
|
||||
else:
|
||||
find_flag = False
|
||||
break
|
||||
|
||||
if find_flag:
|
||||
f.seek(start_index + 40)
|
||||
raw_data = f.read(8)
|
||||
file_size = struct.unpack('<Q', raw_data)[0]
|
||||
file_info = {}
|
||||
file_info.__setitem__('offset',start_index)
|
||||
file_info.__setitem__('size',file_size)
|
||||
squashfs_files_list.append(file_info)
|
||||
index = data.find(first_bytes , start_index + len( first_bytes))
|
||||
start_index = index
|
||||
return squashfs_files_list
|
||||
|
||||
# 提取并导出文件,输出文件大小信息
|
||||
def extract_squashfs_file(squashfs_image, offset, size, output_filename):
|
||||
parent_directory = os.path.dirname(squashfs_image)
|
||||
original_file_name = os.path.basename(squashfs_image)
|
||||
file_name_without_extension = os.path.splitext(original_file_name)[0]
|
||||
file_extension = os.path.splitext(original_file_name)[1]
|
||||
|
||||
new_file = f'{file_name_without_extension}_{output_filename}{file_extension}'
|
||||
file_path = os.path.join(parent_directory, new_file)
|
||||
|
||||
with open(squashfs_image, 'rb') as f_in:
|
||||
f_in.seek(offset)
|
||||
data = f_in.read(size)
|
||||
with open(file_path, 'wb') as f_out:
|
||||
f_out.write(data)
|
||||
print("文件",file_path,"提取完成,文件大小为", size, "字节")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
image_file = 'D:\wifi\随身wifi助手\TQ\yingteng_MZ801_V1.2\mtd\mtd4'
|
||||
# sys.argv[1] = 'output'
|
||||
# sys.argv[2] = image_file
|
||||
|
||||
script_path = os.path.realpath(__file__)
|
||||
print("当前脚本路径: ", script_path)
|
||||
|
||||
script_dir, script_name = os.path.split(script_path)
|
||||
print("当前脚本所在目录: ", script_dir)
|
||||
print("当前脚本文件名: ", script_name)
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print('Usage:')
|
||||
print(f' python3 {script_name} output mtd4.bin')
|
||||
print(f' python3 {script_name} input mtd4.bin example.squashfs')
|
||||
exit(1)
|
||||
else:
|
||||
image_file = sys.argv[2]
|
||||
|
||||
|
||||
if sys.argv[1] == 'output':
|
||||
squashfs_files_list = find_squashfs(image_file)
|
||||
if len(squashfs_files_list) > 0:
|
||||
if len(squashfs_files_list) > 1:
|
||||
print(f'发现 {len(squashfs_files_list)} 个镜像文件')
|
||||
for file_info in squashfs_files_list:
|
||||
extract_squashfs_file(image_file, file_info.get('offset'),file_info.get('size'),f"{hex(file_info.get('offset')).replace('0x', '')}-{hex(file_info.get('offset') + file_info.get('size')).replace('0x', '')}")
|
||||
elif sys.argv[1] == 'input':
|
||||
if len(sys.argv) < 4:
|
||||
print(f'必须最少3个参数:eg: python3 {script_name} input mtd4.bin example.squashfs')
|
||||
exit(1)
|
||||
input_image = sys.argv[3]
|
||||
squashfs_files_list = find_squashfs(image_file)
|
||||
if len(squashfs_files_list) > 0:
|
||||
replaced_image_file_info = squashfs_files_list[0]
|
||||
print(f'发现 {len(squashfs_files_list)} 个镜像文件')
|
||||
if len(squashfs_files_list) > 0:
|
||||
if len(squashfs_files_list) > 1 and len(sys.argv) < 5:
|
||||
print(f'发现 {len(squashfs_files_list)} 个镜像文件,必须4个参数才能打包替换')
|
||||
print(f'eg: python3 {script_name} input mtd4.bin example.squashfs 0')
|
||||
print(f'其中0 表示替换第一个镜像文件,大小不建议超过原始文件大小')
|
||||
exit(1)
|
||||
replaced_image_file_index = 0
|
||||
if len(sys.argv) >= 5:
|
||||
replaced_image_file_index = int(sys.argv[4])
|
||||
replaced_image_file_info = squashfs_files_list[replaced_image_file_index]
|
||||
original_image_offset = replaced_image_file_info.get('offset')
|
||||
original_image_size = replaced_image_file_info.get('size')
|
||||
|
||||
input_image_size = os.path.getsize(input_image)
|
||||
if input_image_size > original_image_size:
|
||||
print(f'导入文件大小超过原始文件大小,可能导致文件结构破坏,建议新镜像文件≤原镜像文件')
|
||||
shutil.copy2(image_file, image_file + ".new")
|
||||
with open(image_file + ".new", 'r+b') as file_a:
|
||||
image_file_content = file_a.read()
|
||||
file_a.seek(original_image_offset)
|
||||
empty_bytes = bytes([0xFF] * original_image_size)
|
||||
|
||||
with open(input_image, 'rb') as file_b:
|
||||
input_image_file_content = file_b.read()
|
||||
file_a.seek(original_image_offset)
|
||||
file_a.write(input_image_file_content)
|
||||
|
||||
451
pyutils/zxic_firmware_tools.py
Normal file
451
pyutils/zxic_firmware_tools.py
Normal file
@@ -0,0 +1,451 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import struct
|
||||
|
||||
|
||||
### 定义参数
|
||||
|
||||
# 当前脚本文件名称
|
||||
script_name = os.path.basename(__file__)
|
||||
|
||||
|
||||
# 输出目录
|
||||
output_path_name = 'output'
|
||||
|
||||
# 固件内分区信息保存文件
|
||||
package_json_file_name = "packages.json"
|
||||
|
||||
# 合并后的新固件名称
|
||||
merged_firmware_name = "full.new.bin"
|
||||
|
||||
# zxic 编程器固件文件魔法头
|
||||
zxic_firmware_magic_header = '00005a045a583735323156316c1e0000'
|
||||
# squashfs 镜像文件魔法头
|
||||
squashfs_magic_header = '68737173'
|
||||
|
||||
|
||||
# 检查核对文件魔法头
|
||||
def check_magic_header(file_path,size,target):
|
||||
with open(file_path, "rb") as file:
|
||||
header = file.read(size)
|
||||
if header.hex() == target:
|
||||
return True
|
||||
return False
|
||||
|
||||
# 通过文件魔法头判断文件是否是 zxic 编程器固件
|
||||
def is_zxic_fireware_file(file_path):
|
||||
return check_magic_header(file_path=file_path, size=16, target=zxic_firmware_magic_header)
|
||||
|
||||
# 通过文件魔法头判断文件是否是 squashfs 镜像
|
||||
def is_squashfs_file(file_path):
|
||||
return check_magic_header(file_path=file_path, size=4, target=squashfs_magic_header)
|
||||
|
||||
# 通过判断是否 squashfs 以及文件大小是否一致,来判断是否是 mtd4 分区
|
||||
def is_mtd4_squashfs_file(file_path):
|
||||
if is_squashfs_file(file_path):
|
||||
file_size = os.path.getsize(file_path)
|
||||
with open(file_path, "rb") as file:
|
||||
file.seek(40)
|
||||
raw_data = file.read(8)
|
||||
suqashfs_size = struct.unpack('<Q', raw_data)[0]
|
||||
if file_size > suqashfs_size:
|
||||
return True
|
||||
elif file_size == suqashfs_size:
|
||||
print(f"{file_path} is a squashfs file not a mtd4 file!")
|
||||
else:
|
||||
raise ValueError(f"{file_path} is a squashfs file but it's file size error!")
|
||||
else:
|
||||
print(f"{file_path} is not a squashfs/mtd4 file !")
|
||||
return False
|
||||
|
||||
|
||||
def get_file_size_in_kb(file_path):
|
||||
try:
|
||||
# 获取文件大小(以字节为单位)
|
||||
size_in_bytes = os.path.getsize(file_path)
|
||||
# 将字节转换为KB
|
||||
size_in_kb = size_in_bytes / 1024
|
||||
print(f"The file size is {size_in_kb} KB")
|
||||
except FileNotFoundError:
|
||||
print("File not found")
|
||||
|
||||
|
||||
# 从编程器固件或者 zloader中读取分区结构
|
||||
def get_partions_info_from_firmware(firmware_file_path):
|
||||
partitions = []
|
||||
with open(firmware_file_path, "rb") as file:
|
||||
file.seek(8224) # 0x2020
|
||||
mtd_offsets = 0
|
||||
i = 0
|
||||
while True:
|
||||
data = file.read(40)
|
||||
if data[:2] == b"\x00\x00":
|
||||
break
|
||||
name = data[:16].rstrip(b"\x00").decode("utf-8")
|
||||
ftype = data[16:32].rstrip(b"\x00").decode("utf-8")
|
||||
size = struct.unpack("<I", data[36:])[0]
|
||||
if ftype != "nand":
|
||||
break
|
||||
print(f"mtd 分区名: {name}, 类型: {ftype}, 大小(Bytes): {size}")
|
||||
partitions.append((f"{i}.{name}", mtd_offsets, size)) # 存储分区的信息
|
||||
i += 1
|
||||
mtd_offsets += size
|
||||
return partitions
|
||||
|
||||
# 针对编程器固件读取分区表并拆分分区
|
||||
def split_mtds_by_partitions(firmware_file_path,output_path):
|
||||
partitions = get_partions_info_from_firmware(firmware_file_path)
|
||||
with open(firmware_file_path, "rb") as file:
|
||||
# 输出每个分区文件
|
||||
for i, (name, offset, size) in enumerate(partitions):
|
||||
file.seek(offset)
|
||||
data = file.read(size)
|
||||
with open(os.path.join(output_path, name), "wb") as partition_file:
|
||||
partition_file.write(data)
|
||||
print(f"分区 {i} {name} 输出到 {name}")
|
||||
# 输出分区结构备份
|
||||
package_json_file = os.path.join(output_path, package_json_file_name)
|
||||
with open(package_json_file, "w") as json_file:
|
||||
json.dump(partitions, json_file)
|
||||
print(f'分区结构表输出到: {package_json_file}')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# 分割固件,支持 zxic 8MB 和 16MB 固件
|
||||
def split_firmware(firmware_file_path):
|
||||
if not os.path.exists(firmware_file_path):
|
||||
print(f"{firmware_file_path} 文件不存在!")
|
||||
return
|
||||
|
||||
|
||||
output_path = os.path.join(os.path.dirname(firmware_file_path), output_path_name)
|
||||
if not os.path.exists(output_path):
|
||||
os.mkdir(output_path)
|
||||
|
||||
with open(firmware_file_path, "rb") as file:
|
||||
file_size = os.path.getsize(firmware_file_path)
|
||||
mtd4_file_system = "unknown"
|
||||
if file_size == 8 * 1024 * 1024:
|
||||
mtd4_file_system = "squashfs"
|
||||
elif file_size == 16 * 1024 * 1024:
|
||||
mtd4_file_system = "jffs2"
|
||||
else:
|
||||
print(f"{firmware_file_path} 似乎并不是一个 zxic 的编程器固件,文件大小必须是 8MB/16MB!")
|
||||
return
|
||||
|
||||
if is_zxic_fireware_file(firmware_file_path):
|
||||
print(f"{firmware_file_path} 固件的rootfs分区是 {mtd4_file_system} 文件系统, 固件大小 size={file_size} bytes")
|
||||
# 是 zxic 固件
|
||||
# partitions = get_partions_info(firmware_file_path)
|
||||
split_mtds_by_partitions(firmware_file_path, output_path)
|
||||
|
||||
# partitions = [] # 用来存储各个分区的偏移量和大小
|
||||
# file.seek(8224) # 0x2020
|
||||
# mtd_offsets = 0
|
||||
# i = 0
|
||||
# while True:
|
||||
# data = file.read(40)
|
||||
# if data[:2] == b"\x00\x00":
|
||||
# break
|
||||
# name = data[:16].rstrip(b"\x00").decode("utf-8")
|
||||
# ftype = data[16:32].rstrip(b"\x00").decode("utf-8")
|
||||
# size = struct.unpack("<I", data[36:])[0]
|
||||
# if ftype != "nand":
|
||||
# break
|
||||
# print(f"mtd 分区名: {name}, 类型: {ftype}, 大小(Bytes): {size}")
|
||||
# partitions.append((f"{i}.{name}", mtd_offsets, size)) # 存储分区的信息
|
||||
# i += 1
|
||||
# mtd_offsets += size
|
||||
|
||||
# # 输出每个分区文件
|
||||
# for i, (name, offset, size) in enumerate(partitions):
|
||||
# file.seek(offset)
|
||||
# data = file.read(size)
|
||||
# with open(os.path.join(output_path, name), "wb") as partition_file:
|
||||
# partition_file.write(data)
|
||||
# print(f"分区 {i} {name} 输出到 {name}")
|
||||
# # 输出分区结构备份
|
||||
# package_json_file = os.path.join(output_path, package_json_file_name)
|
||||
# with open(package_json_file, "w") as json_file:
|
||||
# json.dump(partitions, json_file)
|
||||
# print(f'分区结构表输出到: {package_json_file}')
|
||||
# elif header.hex().startswith("68737173"):
|
||||
# print(f"{firmware_file_path}")
|
||||
return output_path
|
||||
|
||||
# 合并 MTD 分区为编程器固件
|
||||
def merge_firmware(mtds_directory):
|
||||
if len(mtds_directory) < 2:
|
||||
print(f"{mtds_directory} 路径错误!")
|
||||
return
|
||||
mtds_directory = os.path.join(mtds_directory, output_path_name)
|
||||
|
||||
package_json_file = os.path.join(mtds_directory, package_json_file_name)
|
||||
if not os.path.exists(package_json_file):
|
||||
print(f'目录 {mtds_directory} 未找到 {package_json_file_name}')
|
||||
return
|
||||
|
||||
merged_firmware_file = os.path.join(mtds_directory, merged_firmware_name)
|
||||
|
||||
with open(package_json_file, "r") as json_file:
|
||||
partitions = json.load(json_file)
|
||||
full_firmware = bytearray()
|
||||
for name, offset, size in partitions:
|
||||
with open(os.path.join(mtds_directory,name), "rb") as partition_file:
|
||||
print(f'合并 mtd 分区 {name} 到 {merged_firmware_file}...')
|
||||
data = partition_file.read()
|
||||
full_firmware += data
|
||||
with open(merged_firmware_file, "wb") as full_file:
|
||||
full_file.write(full_firmware)
|
||||
print(f"合并固件到 {merged_firmware_file} 成功!")
|
||||
|
||||
|
||||
# 编程器固件或 mtd 分区内查找squashfs镜像文件
|
||||
def find_squashfs(filename):
|
||||
# Squashfs filesystem, little endian, version 4.0, compression:xz
|
||||
magic = '68 73 71 73 ** ** 00 00 ** ** ** ** 00 00 ** 00 ** 00 00 00 ** 00 ** 00 ** ** ** 00 ** 00 00 00 ** ** ** ** 00 00 00 00 ** ** ** 00 00 00 00 00 ** ** ** 00 00 00 00 00 FF FF FF FF FF FF FF FF'
|
||||
hex_bytes_list = magic.replace(' ','').split('**')
|
||||
first_bytes = bytes.fromhex(hex_bytes_list[0])
|
||||
squashfs_files_list = []
|
||||
with open(filename, 'rb') as f:
|
||||
data = f.read()
|
||||
|
||||
index = data.find(first_bytes)
|
||||
start_index = index
|
||||
file_size = 0
|
||||
while index != -1: # 当找到了匹配的字节序列时
|
||||
find_flag = True
|
||||
f.seek(start_index)
|
||||
print(f"在偏移位置 {start_index} 找到了字节序列")
|
||||
for line in hex_bytes_list:
|
||||
line_byte_size = int(len(line)/2)
|
||||
if line_byte_size == 0 :
|
||||
index = int(index + 1)
|
||||
continue
|
||||
f.seek(index)
|
||||
if f.read(int(line_byte_size)) == bytes.fromhex(line):
|
||||
index = int(index + line_byte_size + 1)
|
||||
continue
|
||||
else:
|
||||
find_flag = False
|
||||
break
|
||||
|
||||
if find_flag:
|
||||
f.seek(start_index + 40)
|
||||
raw_data = f.read(8)
|
||||
file_size = struct.unpack('<Q', raw_data)[0]
|
||||
file_info = {}
|
||||
file_info.__setitem__('offset',start_index)
|
||||
file_info.__setitem__('size',file_size)
|
||||
squashfs_files_list.append(file_info)
|
||||
index = data.find(first_bytes , start_index + len( first_bytes))
|
||||
start_index = index
|
||||
return squashfs_files_list
|
||||
|
||||
# 提取并导出 squashfs 镜像文件,输出文件大小信息
|
||||
def extract_squashfs_file(squashfs_image, offset, size, output_filename):
|
||||
parent_directory = os.path.dirname(squashfs_image)
|
||||
original_file_name = os.path.basename(squashfs_image)
|
||||
file_name_without_extension = os.path.splitext(original_file_name)[0]
|
||||
file_extension = os.path.splitext(original_file_name)[1]
|
||||
|
||||
new_file = f'{file_name_without_extension}_{output_filename}{file_extension}'
|
||||
output_path = parent_directory
|
||||
if os.path.basename(output_path) != output_path_name:
|
||||
output_path = os.path.join(parent_directory,output_path_name)
|
||||
file_path = os.path.join(output_path, new_file)
|
||||
|
||||
with open(squashfs_image, 'rb') as f_in:
|
||||
f_in.seek(offset)
|
||||
data = f_in.read(size)
|
||||
with open(file_path, 'wb') as f_out:
|
||||
f_out.write(data)
|
||||
print("文件",file_path,"提取完成,文件大小为", size, "字节")
|
||||
|
||||
|
||||
# 从编程器固件或 mtd4分区中提取 squashfs 镜像文件
|
||||
def unpack_firmware(file_path):
|
||||
if len(file_path) < 2:
|
||||
print(f"{file_path} path invalid!")
|
||||
return
|
||||
extract_flag = False
|
||||
if is_mtd4_squashfs_file(file_path):
|
||||
extract_flag =True
|
||||
print(f'{file_path} 识别为`squashfs mtd4`分区....')
|
||||
elif is_zxic_fireware_file(file_path):
|
||||
extract_flag =True
|
||||
print(f'{file_path} 识别为`zxic firmware`编程器固件....')
|
||||
|
||||
squashfs_files_list = find_squashfs(file_path)
|
||||
if len(squashfs_files_list) > 0:
|
||||
if len(squashfs_files_list) > 1:
|
||||
print(f'发现 {len(squashfs_files_list)} 个镜像文件')
|
||||
for file_info in squashfs_files_list:
|
||||
extract_squashfs_file(file_path, file_info.get('offset'),file_info.get('size'),f"{hex(file_info.get('offset')).replace('0x', '')}-{hex(file_info.get('offset') + file_info.get('size')).replace('0x', '')}")
|
||||
|
||||
# 将 squashfs 镜像文件填充 到 编程器固件或 mtd4 分区
|
||||
def repack_firmware(input_image, target_file_path, replaced_image_file_index=0):
|
||||
if not os.path.exists(input_image):
|
||||
print(f'待回写 {input_image} 文件不存在')
|
||||
return
|
||||
if not os.path.exists(target_file_path):
|
||||
print(f'回写目标 {target_file_path} 文件不存在')
|
||||
return
|
||||
|
||||
squashfs_files_list = find_squashfs(target_file_path)
|
||||
if len(squashfs_files_list) > 0:
|
||||
replaced_image_file_info = squashfs_files_list[0]
|
||||
print(f'发现 {len(squashfs_files_list)} 个镜像文件')
|
||||
if len(squashfs_files_list) > 0:
|
||||
if replaced_image_file_index > 0:
|
||||
replaced_image_file_info = squashfs_files_list[replaced_image_file_index]
|
||||
original_image_offset = replaced_image_file_info.get('offset')
|
||||
original_image_size = replaced_image_file_info.get('size')
|
||||
|
||||
input_image_size = os.path.getsize(input_image)
|
||||
if input_image_size > original_image_size:
|
||||
print(f'导入文件大小超过原始文件大小,可能导致文件结构破坏,建议新镜像文件≤原镜像文件')
|
||||
shutil.copy2(target_file_path, target_file_path + ".bak")
|
||||
with open(target_file_path, 'r+b') as file_a:
|
||||
image_file_content = file_a.read()
|
||||
file_a.seek(original_image_offset)
|
||||
empty_bytes = bytes([0xFF] * original_image_size)
|
||||
with open(input_image, 'rb') as file_b:
|
||||
input_image_file_content = file_b.read()
|
||||
file_a.seek(original_image_offset)
|
||||
file_a.write(input_image_file_content)
|
||||
print(f'{input_image} 已回写到 {target_file_path}')
|
||||
|
||||
def unsquashfs(target_file_path):
|
||||
if not os.path.exists(target_file_path):
|
||||
print(f'suqashfs 镜像 {target_file_path} 文件不存在')
|
||||
return
|
||||
args = ["unsquashfs"]
|
||||
parent_directory = os.path.dirname(target_file_path)
|
||||
target_squashfs_root_dir = os.path.join(parent_directory,"squashfs-root")
|
||||
|
||||
args.append("-d")
|
||||
args.append(target_squashfs_root_dir)
|
||||
args.append(target_file_path)
|
||||
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = process.communicate()
|
||||
if process.returncode != 0:
|
||||
response = err
|
||||
else:
|
||||
response = out
|
||||
if response != b'':
|
||||
lines = response.decode('utf-8').split('\r\r\n')
|
||||
lines = [line for line in lines if line]
|
||||
print(lines)
|
||||
return out
|
||||
|
||||
def mksquashfs(target_squashfs_root_dir):
|
||||
if not os.path.exists(target_squashfs_root_dir):
|
||||
print(f'suqashfs-root 目录 {target_squashfs_root_dir} 文件不存在')
|
||||
return
|
||||
|
||||
# mksquashfs squashfs-root/ new.squashfs -no-xattrs -b 262144 -comp xz -Xbcj armthumb -Xdict-size 256KiB -no-sparse
|
||||
args = ["mksquashfs"]
|
||||
parent_directory = os.path.dirname(target_squashfs_root_dir)
|
||||
new_squashfs_root_file = os.path.join(parent_directory,"new.squashfs")
|
||||
|
||||
args.append(target_squashfs_root_dir)
|
||||
args.append(new_squashfs_root_file)
|
||||
# args.append('-no-xattrs -b 262144 -comp xz -Xbcj armthumb -Xdict-size 256KiB -no-sparse')
|
||||
args.append("-no-xattrs")
|
||||
|
||||
args.append("-b")
|
||||
args.append("262144")
|
||||
|
||||
args.append("-comp")
|
||||
args.append("xz")
|
||||
|
||||
args.append("-Xbcj")
|
||||
args.append("armthumb")
|
||||
|
||||
args.append("-Xdict-size")
|
||||
args.append("256KiB")
|
||||
|
||||
args.append("-no-sparse")
|
||||
args.append("-noappend")
|
||||
|
||||
# arg =' '.join(args)
|
||||
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = process.communicate()
|
||||
if process.returncode != 0:
|
||||
response = err
|
||||
else:
|
||||
response = out
|
||||
if response != b'':
|
||||
lines = response.decode('utf-8').split('\r\r\n')
|
||||
lines = [line for line in lines if line]
|
||||
print(lines)
|
||||
return out
|
||||
|
||||
|
||||
|
||||
# 使用帮助
|
||||
def help(file_name):
|
||||
print(f"Usage: python script.py <operation> <firmware_file>\n")
|
||||
print(f"operations:\n")
|
||||
print(f" split split mtds from a firmware")
|
||||
print(f" eg: python3 {file_name} split c:\\full.bin\n")
|
||||
|
||||
print(f" merge merge mtds from a directory to a firmware")
|
||||
print(f" eg: python3 {file_name} merge c:\\\n")
|
||||
|
||||
print(f" unpack unpack a squashfs from mtd4 or firmware ")
|
||||
print(f" eg: python3 {file_name} unpack c:\\full.bin\n")
|
||||
print(f" eg: python3 {file_name} unpack c:\\mtd4\n")
|
||||
|
||||
print(
|
||||
f" repack repack squashfs-root to a squashfs filesystem and put it into mtd4 or firmware"
|
||||
)
|
||||
print(f" eg: python3 {file_name} repack c:\\squashfs-root c:\\full.bin\n")
|
||||
print(f" eg: python3 {file_name} repack c:\\squashfs-root c:\\mtd4\n")
|
||||
|
||||
return
|
||||
|
||||
|
||||
# 主函数
|
||||
def main():
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
help(script_name)
|
||||
return
|
||||
|
||||
operation = sys.argv[1]
|
||||
file_path = sys.argv[2]
|
||||
|
||||
if operation == "split":
|
||||
# 实现拆分固件的方法
|
||||
split_firmware(file_path)
|
||||
elif operation == "merge":
|
||||
# 实现合并固件的方法
|
||||
merge_firmware(file_path)
|
||||
pass
|
||||
elif operation == "unpack":
|
||||
# 实现解包固件的方法
|
||||
unpack_firmware(file_path)
|
||||
pass
|
||||
elif operation == "repack":
|
||||
# 实现重新打包固件的方法
|
||||
if len(sys.argv) != 4:
|
||||
print(f"!!!!!!!! repack need at least 4 parameters\n\n")
|
||||
help(script_name)
|
||||
target_file_path = sys.argv[3]
|
||||
replaced_image_file_index = 0
|
||||
if len(sys.argv) >= 5:
|
||||
replaced_image_file_index = int(sys.argv[4])
|
||||
repack_firmware(file_path,target_file_path, replaced_image_file_index)
|
||||
else:
|
||||
help(script_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user