Capture DOS screen output from a command line

本文介绍了一种在VB中捕获DOS命令输出的方法,通过创建管道并使用特定API来读取输出,实现了同步运行DOS命令并返回屏幕输出的功能。

This code is extremely useful if you ever need to capture output from a DOS screen/console. The simple demonstration below, shows how to capture the output from a batch file.

Note, to redirect other handles (STDIN and STDERR), create a pipe for each handle for which redirection is desired. The code should read from the read ends of the pipes for the redirected STDOUT and STDERR. If STDIN redirection was desired, the could should write to the write end of the appropriate pipe.

Option Explicit

Private Declare Function CreatePipe Lib "kernel32" (phReadPipe As Long, phWritePipe As Long, lpPipeAttributes As Any, ByVal nSize As Long) As Long
Private Declare Function ReadFile Lib "kernel32" (ByVal hFile As Long, ByVal lpBuffer As String, ByVal nNumberOfBytesToRead As Long, lpNumberOfBytesRead As Long, ByVal lpOverlapped As Any) As Long
Private Declare Function GetNamedPipeInfo Lib "kernel32" (ByVal hNamedPipe As Long, lType As Long, lLenOutBuf As Long, lLenInBuf As Long, lMaxInstances As Long) As Long
  
Private Type SECURITY_ATTRIBUTES
    nLength As Long
    lpSecurityDescriptor As Long
    bInheritHandle As Long
End Type

Private Type STARTUPINFO
    cb As Long
    lpReserved As Long
    lpDesktop As Long
    lpTitle As Long
    dwX As Long
    dwY As Long
    dwXSize As Long
    dwYSize As Long
    dwXCountChars As Long
    dwYCountChars As Long
    dwFillAttribute As Long
    dwFlags As Long
    wShowWindow As Integer
    cbReserved2 As Integer
    lpReserved2 As Long
    hStdInput As Long
    hStdOutput As Long
    hStdError As Long
End Type

Private Type PROCESS_INFORMATION
    hProcess As Long
    hThread As Long
    dwProcessID As Long
    dwThreadID As Long
End Type

Private Declare Function WaitForSingleObject Lib "kernel32" (ByVal hHandle As Long, ByVal dwMilliseconds As Long) As Long
Private Declare Function CreateProcessA Lib "kernel32" (ByVal lpApplicationName As Long, ByVal lpCommandLine As String, lpProcessAttributes As Any, lpThreadAttributes As Any, ByVal bInheritHandles As Long, ByVal dwCreationFlags As Long, ByVal lpEnvironment As Long, ByVal lpCurrentDirectory As Long, lpStartupInfo As Any, lpProcessInformation As Any) As Long
Private Declare Function CloseHandle Lib "kernel32" (ByVal hObject As Long) As Long


'Purpose     :  Synchronously runs a DOS command line and returns the captured screen output.
'Inputs      :  sCommandLine                The DOS command line to run.
'               [bShowWindow]               If True displays the DOS output window.
'Outputs     :  Returns the screen output
'Author      :  Andrew Baker
'Date        :  03/09/2000 14:17
'Notes       :  This routine will work only with those program that send their output to
'               the standard output device (stdout).
'               Windows NT ONLY.
'Revisions   :

Function ShellExecuteCapture(sCommandLine As String, Optional bShowWindow As Boolean = False) As String
    Const clReadBytes As Long = 256, INFINITE As Long = &HFFFFFFFF
    Const STARTF_USESHOWWINDOW = &H1, STARTF_USESTDHANDLES = &H100&
    Const SW_HIDE = 0, SW_NORMAL = 1
    Const NORMAL_PRIORITY_CLASS = &H20&
   
    Const PIPE_CLIENT_END = &H0     'The handle refers to the client end of a named pipe instance. This is the default.
    Const PIPE_SERVER_END = &H1     'The handle refers to the server end of a named pipe instance. If this value is not specified, the handle refers to the client end of a named pipe instance.
    Const PIPE_TYPE_BYTE = &H0      'The named pipe is a byte pipe. This is the default.
    Const PIPE_TYPE_MESSAGE = &H4   'The named pipe is a message pipe. If this value is not specified, the pipe is a byte pipe
   
   
    Dim tProcInfo As PROCESS_INFORMATION, lRetVal As Long, lSuccess As Long
    Dim tStartupInf As STARTUPINFO
    Dim tSecurAttrib As SECURITY_ATTRIBUTES, lhwndReadPipe As Long, lhwndWritePipe As Long
    Dim lBytesRead As Long, sBuffer As String
    Dim lPipeOutLen As Long, lPipeInLen As Long, lMaxInst As Long
   
    tSecurAttrib.nLength = Len(tSecurAttrib)
    tSecurAttrib.bInheritHandle = 1&
    tSecurAttrib.lpSecurityDescriptor = 0&

    lRetVal = CreatePipe(lhwndReadPipe, lhwndWritePipe, tSecurAttrib, 0)
    If lRetVal = 0 Then
        'CreatePipe failed
        Exit Function
    End If

    tStartupInf.cb = Len(tStartupInf)
    tStartupInf.dwFlags = STARTF_USESTDHANDLES Or STARTF_USESHOWWINDOW
    tStartupInf.hStdOutput = lhwndWritePipe
    If bShowWindow Then
        'Show the DOS window
        tStartupInf.wShowWindow = SW_NORMAL
    Else
        'Hide the DOS window
        tStartupInf.wShowWindow = SW_HIDE
    End If

    lRetVal = CreateProcessA(0&, sCommandLine, tSecurAttrib, tSecurAttrib, 1&, NORMAL_PRIORITY_CLASS, 0&, 0&, tStartupInf, tProcInfo)
    If lRetVal <> 1 Then
        'CreateProcess failed
        Exit Function
    End If
   
    'Process created, wait for completion. Note, this will cause your application
    'to hang indefinately until this process completes.
    'Note, you could alternatively use a loop, or a timeout (in ms) (see the "ShellWait" function at
    'http://www.vbusers.com/code/codeget.asp?ThreadID=70&PostID=1 for details)
    WaitForSingleObject tProcInfo.hProcess, INFINITE
   
    'Determine pipes contents
    lSuccess = GetNamedPipeInfo(lhwndReadPipe, PIPE_TYPE_BYTE, lPipeOutLen, lPipeInLen, lMaxInst)
    If lSuccess Then
        'Got pipe info, create buffer
        sBuffer = String(lPipeOutLen, 0)
        'Read Output Pipe
        lSuccess = ReadFile(lhwndReadPipe, sBuffer, lPipeOutLen, lBytesRead, 0&)
        If lSuccess = 1 Then
            'Pipe read successfully
            ShellExecuteCapture = Left$(sBuffer, lBytesRead)
        End If
    End If
   
    'Close handles
    Call CloseHandle(tProcInfo.hProcess)
    Call CloseHandle(tProcInfo.hThread)
    Call CloseHandle(lhwndReadPipe)
    Call CloseHandle(lhwndWritePipe)
End Function
 

'Demonstration routine
'NOTE: Create a file called "C:/test.bat" containing a single line:
'   dir *.*
Sub Test()
    Debug.Print ShellExecuteCapture("C:/test.bat", False)
End Sub

这个是库的 文档说明 react-native-vision Library for accessing VisionKit and visual applications of CoreML from React Native. iOS Only Incredibly super-alpha, and endeavors to provide a relatively thin wrapper between the underlying vision functionality and RN. Higher-level abstractions are @TODO and will be in a separate library. Installation yarn add react-native-vision react-native-swift react-native link Note react-native-swift is a peer dependency of react-native-vision. If you are running on a stock RN deployment (e.g. from react-native init) you will need to make sure your app is targeting IOS 11 or higher: yarn add react-native-fix-ios-version react-native link Since this module uses the camera, it will work much better on a device, and setting up permissions and codesigning in advance will help: yarn add -D react-native-camera-ios-enable yarn add -D react-native-setdevteam react-native link react-native setdevteam Then you are ready to run! react-native run-ios --device Command line - adding a Machine Learning Model with add-mlmodel react-native-vision makes it easier to bundle a pre-built machine learning model into your app. After installing, you will find the following command available: react-native add-mlmodel /path/to/mymodel.mlmodel You may also refere to the model from a URL, which is handy when getting something off the interwebs. For example, to apply the pre-built mobileNet model from apple, you can: react-native add-mlmodel https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel Note that the name of your model in the code will be the same as the filename minus the "mlmodel". In the above case, the model in code can be referenced as "MobileNet" Easy Start 1 : Full Frame Object Detection One of the most common easy use cases is just detecting what is in front of you. For this we use the VisionCamera component that lets you apply a model and get the classification via render props. Setup react-native init imagedetector; cd imagedetector yarn add react-native-swift react-native-vision yarn add react-native-fix-ios-version react-native-camera-ios-enable react-native-setdevteam react-native link react-native setdevteam Load your model with MobileNet A free download from Apple! react-native add-mlmodel https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel Add Some App Code import React from "react"; import { Text } from "react-native"; import { VisionCamera } from "react-native-vision"; export default () => ( <VisionCamera style={{ flex: 1 }} classifier="MobileNet"> {({ label, confidence }) => ( <Text style={{ width: "75%", fontSize: 50, position: "absolute", right: 50, bottom: 100 }} > {label + " :" + (confidence * 100).toFixed(0) + "%"} </Text> )} </VisionCamera> ); Easy Start 2: GeneratorView - for Style Transfer Most machine learning application are classifiers. But generators can be useful and a lot of fun. The GeneratorView lets you look at style transfer models that show how you can use deep learning techniques for creating whole new experiences. Setup react-native init styletest; cd styletest yarn add react-native-swift react-native-vision yarn add react-native-fix-ios-version react-native-camera-ios-enable react-native-setdevteam react-native link react-native setdevteam Load your model with add-mlmodel Apple has not published a style transfer model, but there are a few locations on the web where you can download them. Here is one: https://github.com/mdramos/fast-style-transfer-coreml So go to his github, navigate to his google drive, and then download the la_muse model to your personal Downloads directory. react-native add-mlmodel ~/Downloads/la_muse.mlmodel App Code This is the insanely short part. Note that the camera view is not necessary for viewing the style-transferred view: its just for reference. import React from "react"; import { GeneratorView, RNVCameraView } from "react-native-vision"; export default () => ( <GeneratorView generator="FNS-The-Scream" style={{ flex: 1 }}> <RNVCameraView style={{ position: "absolute", height: 200, width: 100, top: 0, right: 0 }} resizeMode="center" /> </GeneratorView> ); Easy Start 3: Face Camera Detect what faces are where in your camera view! Taking a page (and the model!) from (https://github.com/gantman/nicornot)[Gant Laborde's NicOrNot app], here is the entirety of an app that discerns whether the target is nicolas cage. Setup react-native init nictest; cd nictest yarn add react-native-swift react-native-vision yarn add react-native-fix-ios-version react-native-camera-ios-enable react-native-setdevteam react-native link react-native setdevteam Load your model with add-mlmodel react-native add-mlmodel https://s3.amazonaws.com/despiteallmyrage/MegaNic50_linear_5.mlmodel App Code import React from "react"; import { Text, View } from "react-native"; import { FaceCamera } from "react-native-vision"; import { Identifier } from "react-native-identifier"; export default () => ( <FaceCamera style={{ flex: 1 }} classifier="MegaNic50_linear_5"> {({ face, faceConfidence, style }) => face && (face == "nic" ? ( <Identifier style={{ ...style }} accuracy={faceConfidence} /> ) : ( <View style={{ ...style, justifyContent: "center", alignItems: "center" }} > <Text style={{ fontSize: 50, color: "red", opacity: faceConfidence }}> X </Text> </View> )) } </FaceCamera> ); Face Detection Component Reference FacesProvider Context Provider that extends <RNVisionProvider /> to detect, track, and identify faces. Props Inherits from <RNVisionProvider />, plus: interval: How frequently (in ms) to run the face detection re-check. (Basically lower values here keeps the face tracking more accurate) Default: 500 classifier: File URL to compiled MLModel (e.g. mlmodelc) that will be applied to detected faces updateInterval: How frequently (in ms) to update the detected faces - position, classified face, etc. Smaller values will mean smoother animation, but at the price of processor intensity. Default: 100 Example <FacesProvider isStarted={true} isCameraFront={true} classifier={this.state.classifier} > {/* my code for handling detected faces */} </FacesProvider> FacesConsumer Consumer of <FacesProvider /> context. As such, takes no props and returns a render prop function. Render Prop Members faces: Keyed object of information about the detected face. Elements of each object include: region: The key associated with this object (e.g. faces[k].region === k) x, y, height, width: Position and size of the bounding box for the detected face. faces: Array of top-5 results from face classifier, with keys label and confidence face: Label of top-scoring result from classifier (e.g. the face this is most likely to be) faceConfidence: Confidence score of top-scoring result above. Note that when there is no classifier specified, faces, face and faceConfidence are undefined Face Render prop generator to provision information about a single detected face. Can be instantiated by spread-propping the output of a single face value from <FacesConsumer> or by appling a faceID that maps to the key of a face. Returns null if no match. Props faceID: ID of the face (corresponding to the key of the faces object in FacesConsumer) Render Prop Members region: The key associated with this object (e.g. faces[k].region === k) x, y, height, width: Position and size of the bounding box for the detected face. Note These are adjusted for the visible camera view when you are rendering from that context. faces: Array of top-5 results from face classifier, with keys label and confidence face: Label of top-scoring result from classifier (e.g. the face this is most likely to be) faceConfidence: Confidence score of top-scoring result above. Note These arguments are the sam Faces A render-prop generator to provision information about all detected faces. Will map all detected faces into <Face> components and apply the children prop to each, so you have one function to generate all your faces. Designed to be similar to FlatMap implentation. Required Provider Context This component must be a descendant of a <FacesProvider> Props None Render Prop Members Same as <Face> above, but output will be mapped across all detected faces. Example of use is in the primary Face Recognizer demo code above. Props faceID: ID of the face applied. isCameraView: Whether the region frame information to generate should be camera-aware (e.g. is it adjusted for a preview window or not) Render Props This largely passes throught the members of the element that you could get from the faces collection from FaceConsumer, with the additional consideration that when isCameraView is set, style: A spreadable set of styling members to position the rectangle, in the same style as a RNVCameraRegion If faceID is provided but does not map to a member of the faces collection, the function will return null. Core Component References The package exports a number of components to facilitate the vision process. Note that the <RNVisionProvider /> needs to be ancestors to any others in the tree. So a simple single-classifier using dominant image would look something like: <RNVisionProvider isStarted={true}> <RNVDefaultRegion classifiers={[{url: this.state.FileUrlOfClassifier, max: 5}]}> {({classifications})=>{ return ( <Text> {classifications[this.state.FileUrlOfClassifier][0].label} </Text> }} </RNVDefaultRegion> </RNVisionProvider> RNVisionProvider Context provider for information captured from the camera. Allows the use of regional detection methods to initialize identification of objects in the frame. Props isStarted: Whether the camera should be activated for vision capture. Boolean isCameraFront: Facing of the camera. False for the back camera, true to use the front. Note only one camera facing can be used at a time. As of now, this is a hardware limitation. regions: Specified regions on the camera capture frame articulated as {x,y,width,height} that should always be returned by the consumer trackedObjects: Specified regions that should be tracked as objects, so that the regions returned match these object IDs and show current position. onRegionsChanged: Fires when the list of regions has been altered onDetectedFaces: Fires when the number of detected faces has changed Class imperative member detectFaces: Triggers one call to detect faces based on current active frame. Directly returns locations. RNVisionConsumer Consumer partner of RNVisionProvider. Must be its descendant in the node tree. Render Prop Members imageDimensions: Object representing size of the camera frame in {width, height} isCameraFront: Relaying whether camera is currently in selfie mode. This is important if you plan on displaying camera output, because in selfie mode a preview will be mirrored. regions: The list of detected rectangles in the most recently captured frame, where detection is driven by the RNVisionProvider props RNVRegion Props region: ID of the region (Note the default region, which is the whole frame, has an id of "" - blank.) classifiers: CoreML classifiers passed as file URLs to the classifier mlmodelc itself. Array generators: CoreML image generators passed as file URLs to the classifier mlmodelc itself. Array generators: CoreML models that generate a collection of output values passed as file URLs to the classifier mlmodelc itself. bottlenecks: A collection of CoreML models that take other CoreML model outputs as their inputs. Keys are the file URLs of the original models (that take an image as their input) and values are arrays of mdoels that generate the output passed via render props. onFrameCaptured: Callback to fire when a new image of the current frame in this region has been captured. Making non-null activates frame capture, setting to null turns it off. The callback passes a URL of the saved frame image file. Render Prop members key: ID of the region x, y, width, height: the elements of the frame containing the region. All values expressed as percentages of the overall frame size, so a 50x100 frame at origin 5,10 in a 500x500 frame would come across as {x: 0.01, y: 0.02, width: .1, height: .2}. Changes in these values are often what drives the re-render of the component (and therefore re-run of the render prop) confidence: If set, the confidence that the object identified as key is actually at this location. Used by tracked objects API of iOS Vision. Sometimes null. classifications: Collection, keyed by the file URL of the classifier passed in props, of collections of labels and probabilities. (e.g. {"file:///path/to/myclassifier.mlmodelc": {"label1": 0.84, "label2": 0.84}}) genericResults: Collection of generic results returned from generic models passed in via props to the region RNVDefaultRegion Convenience region that references the full frame. Same props as RNVRegion, except region is always set to "" - the full frame. Useful for simple style transfers or "dominant image" classifiers. Props Same as RNVRegion, with the exception that region is forced to "" Render Prop Members Same as RNVRegion, with the note that key will always be "" RNVCameraView Preview of the camera captured by the RNVisionProvider. Note that the preview is flipped in selfie mode (e.g. when isCameraFront is true) Props The properties of a View plus: gravity: how to scale the captured camera frame in the view. String. Valid values: fill: Fills the rectangle much like the "cover" in an Image resize: Leaves transparent (or style:{backgroundColor}) the parts of the rectangle that are left over from a resized version of the image. RNVCameraConsumer Render prop consumer for delivering additional context that regions will find helpful, mostly for rendering rectangles that map to the regions identified. Render Prop Members viewPortDimensions: A collection of {width, height} of the view rectangle. viewPortGravity: A pass-through of the gravity prop to help decide how to manage the math converting coordinates. RNVCameraRegion A compound consumer that blends the render prop members of RNVRegion and RNVCameraConsumer and adds a style prop that can position the region on a specified camera preview Props Same as RNVRegion Render Prop Members Includes members from RNVRegion and RNVCameraConsumer and adds: style: A pre-built colleciton of style prop members {position, width, height, left, top} that are designed to act in the context of the RNVCameraView rectangle. Spread-prop with your other style preferences (border? backgroundColor?) for easy on-screen representation. RNVImageView View for displaying output of image generators. Link it to , and the resulting image will display in this view. Useful for style transfer models. More performant because there is no round trip to JavaScript notifying of each frame update. Props id: the ID of an image generator model attached to a region. Usually is the file:/// URL of the .mlmodelc. Otherwise conforms to Image and View API. 请叫我如何做
11-06
import tkinter as tk from tkinter import scrolledtext, ttk, filedialog, messagebox import subprocess import threading import os import datetime class ADBBackupTool: def __init__(self, root): self.root = root root.title("晶晨/海思ADB备份工具") def center_window(width, height, y_offset=0): screen_width = root.winfo_screenwidth() screen_height = root.winfo_screenheight() x = (screen_width - width) // 2 y = (screen_height - height) // 2 + y_offset return f"{width}x{height}+{x}+{y}" root.geometry(center_window(1200, 700, -50)) self.context_menu = tk.Menu(root, tearoff=0) self.context_menu.add_command(label="复制", command=lambda: self.copy_text()) self.context_menu.add_command(label="粘贴", command=lambda: self.paste_text()) self.context_menu.add_separator() self.context_menu.add_command(label="全选", command=lambda: self.select_all()) self.main_frame = tk.Frame(root) self.main_frame.pack(fill=tk.BOTH, expand=True, padx=5, pady=5) self.main_frame.columnconfigure(0, weight=1) self.main_frame.rowconfigure(0, weight=1) RIGHT_PANE_WIDTH = 300 self.cmd_frame = tk.Frame(self.main_frame) self.cmd_frame.grid(row=0, column=0, sticky="nsew") self.cmd_label = tk.Label(self.cmd_frame, text="命令窗口", font=('Arial', 10, 'bold')) self.cmd_label.pack(anchor='w') self.cmd_text = scrolledtext.ScrolledText( self.cmd_frame, wrap=tk.WORD, font=('Consolas', 10), bg='black', fg='white', insertbackground='white' ) self.cmd_text.pack(fill=tk.BOTH, expand=True) self.cmd_text.bind("<Button-3>", self.show_context_menu) self.function_frame = tk.Frame(self.main_frame, width=RIGHT_PANE_WIDTH) self.function_frame.grid(row=0, column=1, sticky="nsew", padx=(5, 0)) self.function_frame.pack_propagate(False) self.conn_frame = tk.LabelFrame(self.function_frame, text="连接设置", padx=5, pady=5) self.conn_frame.pack(fill=tk.X, pady=(0, 5)) ip_port_frame = tk.Frame(self.conn_frame) ip_port_frame.pack(fill=tk.X, pady=2) tk.Label(ip_port_frame, text="IP地址:").pack(side=tk.LEFT, padx=(0, 5)) self.ip_entry = tk.Entry(ip_port_frame, width=15) self.ip_entry.pack(side=tk.LEFT, padx=(0, 10)) self.ip_entry.insert(0, "192.168.31.200") self.ip_entry.bind("<Button-3>", self.show_context_menu) tk.Label(ip_port_frame, text="端口号:").pack(side=tk.LEFT, padx=(0, 5)) self.port_entry = tk.Entry(ip_port_frame, width=9) self.port_entry.pack(side=tk.LEFT) self.port_entry.insert(0, "5555") self.port_entry.bind("<Button-3>", self.show_context_menu) self.connect_btn = tk.Button( self.conn_frame, text="连接设备", command=self.connect_device, bg="#4CAF50", fg="white" ) self.connect_btn.pack(fill=tk.X, pady=(5, 0)) self.query_frame = tk.LabelFrame(self.function_frame, text="快速查询", padx=5, pady=5) self.query_frame.pack(fill=tk.X, pady=(0, 5)) self.cmd_var = tk.StringVar() self.cmd_combobox = ttk.Combobox( self.query_frame, textvariable=self.cmd_var, values=["请选择命令", "晶晨分区获取", "晶晨dtb备份到桌面", "海思分区获取","海思分区获取2", "保存海思分区表", "U盘路径查询"], state="readonly", height=5, width=25 ) self.cmd_combobox.pack(fill=tk.X, pady=2) self.cmd_combobox.current(0) # 默认选中"请选择命令" self.cmd_combobox.bind("<<ComboboxSelected>>", self.update_cmd_entry) self.cmd_mapping = { "晶晨分区获取": "adb shell ls /dev/block", "晶晨dtb备份到桌面": 'adb pull "/dev/dtb" "C:\\Users\\Administrator\\Desktop\\dtb"', "海思分区获取": 'adb shell "cd /dev/block/platform/soc/by-name && ls -l"', "海思分区获取2": "adb shell cat /proc/partitions ", "保存海思分区表": "save_hisilicon_partitions", "U盘路径查询": "adb shell df" } self.cmd_entry = tk.Entry(self.query_frame) self.cmd_entry.pack(fill=tk.X, pady=2) self.cmd_entry.insert(0, "可手动输入") self.cmd_entry.bind("<Button-3>", self.show_context_menu) self.cmd_entry.bind("<FocusIn>", self.on_entry_focus_in) self.cmd_entry.bind("<Key>", self.on_entry_key_press) self.run_cmd_btn = tk.Button( self.query_frame, text="执行命令", command=self.execute_custom_cmd, bg="#2196F3", fg="white" ) self.run_cmd_btn.pack(fill=tk.X, pady=(0, 5)) self.partition_select_frame = tk.LabelFrame(self.function_frame, text="选择要备份的分区", padx=5, pady=5) self.partition_select_frame.pack(fill=tk.BOTH, expand=True) self.partition_container = tk.Frame(self.partition_select_frame) self.partition_container.pack(fill=tk.BOTH, expand=True) self.partition_scrollbar = ttk.Scrollbar(self.partition_container) self.partition_scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.partition_canvas = tk.Canvas( self.partition_container, yscrollcommand=self.partition_scrollbar.set, bg='#f0f0f0', highlightthickness=0 ) self.partition_canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) self.partition_scrollbar.config(command=self.partition_canvas.yview) self.partition_list_frame = tk.Frame(self.partition_canvas, bg='#f0f0f0') self.partition_canvas.create_window((0, 0), window=self.partition_list_frame, anchor="nw") self.partition_list_frame.bind( "<Configure>", lambda e: self.partition_canvas.configure( scrollregion=self.partition_canvas.bbox("all") ) ) self.partition_vars = {} self.chip_type = None self.select_all_var = tk.BooleanVar() self.select_all_check = tk.Checkbutton( self.partition_list_frame, text="全选", variable=self.select_all_var, command=self.toggle_select_all, bg='#f0f0f0', anchor='w', font=('Arial', 10) ) self.select_all_check.pack(fill=tk.X, pady=2, padx=5) self.backup_btn = tk.Button( self.function_frame, text="备份选中分区", command=self.start_backup, bg="#9C27B0", fg="white" ) self.backup_btn.pack(fill=tk.X, pady=(5, 0)) self.status_bar = tk.Label(root, text="就绪", bd=1, relief=tk.SUNKEN, anchor=tk.W) self.status_bar.pack(side=tk.BOTTOM, fill=tk.X) self.check_adb() def save_hisilicon_partitions(self): """保存海思分区表到桌面""" if not hasattr(self, 'chip_type') or self.chip_type != "海思": messagebox.showerror("错误", "请先获取海思分区表") return desktop_path = "C:\\Users\\Administrator\\Desktop" save_path = os.path.join(desktop_path, "海思分区表.txt") try: content = self.cmd_text.get("1.0", tk.END) lines = content.split('\n') partition_lines = [line for line in lines if "->" in line] if not partition_lines: messagebox.showerror("错误", "未找到分区信息") return with open(save_path, 'w', encoding='utf-8') as f: f.write("海思设备分区表\n") f.write(f"生成时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") f.write("="*50 + "\n") for line in partition_lines: f.write(line + "\n") self.append_cmd_output(f"分区表已保存到桌面: 海思分区表.txt") messagebox.showinfo("成功", "分区表已保存到桌面:\n海思分区表.txt") except Exception as e: self.append_cmd_output(f"保存分区表出错: {str(e)}") messagebox.showerror("错误", f"保存分区表出错:\n{str(e)}") def on_entry_focus_in(self, event): if self.cmd_entry.get() == "请选择命令或手动输入": self.cmd_entry.delete(0, tk.END) def on_entry_key_press(self, event): if self.cmd_entry.get() == "请选择命令或手动输入": self.cmd_entry.delete(0, tk.END) def update_cmd_entry(self, event): selected = self.cmd_combobox.get() self.cmd_entry.delete(0, tk.END) self.cmd_entry.insert(0, self.cmd_mapping.get(selected, "")) def show_context_menu(self, event): self.current_widget = event.widget try: self.current_widget.focus_set() self.context_menu.post(event.x_root, event.y_root) except Exception as e: print(f"显示右键菜单出错: {e}") return "break" def copy_text(self): try: if isinstance(self.current_widget, (tk.Entry, scrolledtext.ScrolledText)): self.current_widget.event_generate("<<Copy>>") except Exception as e: print(f"复制出错: {e}") def paste_text(self): try: if isinstance(self.current_widget, (tk.Entry, scrolledtext.ScrolledText)): self.current_widget.event_generate("<<Paste>>") except Exception as e: print(f"粘贴出错: {e}") def select_all(self): try: if isinstance(self.current_widget, tk.Entry): self.current_widget.select_range(0, tk.END) self.current_widget.icursor(tk.END) elif isinstance(self.current_widget, scrolledtext.ScrolledText): self.current_widget.tag_add(tk.SEL, "1.0", tk.END) self.current_widget.mark_set(tk.INSERT, "1.0") self.current_widget.see(tk.INSERT) except Exception as e: print(f"全选出错: {e}") def check_adb(self): try: result = subprocess.run("adb version", shell=True, capture_output=True, text=True) if "Android Debug Bridge" in result.stdout: self.append_cmd_output("ADB已就绪\n" + result.stdout.split('\n')[0]) else: self.append_cmd_output("错误: ADB未正确安装") messagebox.showerror("错误", "ADB未正确安装,请先安装ADB工具") except Exception as e: self.append_cmd_output(f"检查ADB出错: {str(e)}") messagebox.showerror("错误", f"检查ADB出错: {str(e)}") def execute_custom_cmd(self): cmd = self.cmd_entry.get() if not cmd: self.append_cmd_output("错误: 请输入要执行的命令") return if cmd == "save_hisilicon_partitions": self.save_hisilicon_partitions() return self.append_cmd_output(f"执行命令: {cmd}") threading.Thread(target=self._execute_cmd, args=(cmd,), daemon=True).start() def _execute_cmd(self, cmd): try: process = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True ) while True: output = process.stdout.readline() if output == '' and process.poll() is not None: break if output: self.root.after(0, self.append_cmd_output, output.strip()) stderr = process.stderr.read() if stderr: self.root.after(0, self.append_cmd_output, f"错误:\n{stderr.strip()}") self.root.after(0, self.append_cmd_output, f"命令执行完成,返回值: {process.returncode}") if "ls /dev/block" in cmd or "by-name" in cmd: self._parse_partitions(cmd) except Exception as e: self.root.after(0, self.append_cmd_output, f"执行命令出错: {str(e)}") def _parse_partitions(self, cmd): for widget in self.partition_list_frame.winfo_children()[1:]: widget.destroy() self.partition_vars.clear() partitions = [] if "by-name" in cmd: self.chip_type = "海思" self.append_cmd_output("检测到海思设备分区表") result = subprocess.run(cmd, shell=True, capture_output=True, text=True) for line in result.stdout.split('\n'): if "->" in line: part_name = line.split("->")[-1].split("/")[-1].strip() if part_name: partitions.append(part_name) else: self.chip_type = "晶晨" self.append_cmd_output("检测到晶晨设备分区表") result = subprocess.run(cmd, shell=True, capture_output=True, text=True) for line in result.stdout.split('\n'): if line and not line.startswith("total") and not line.startswith("ls:"): part_name = line.split()[-1] if part_name and part_name not in ["", "by-name", "platform"]: partitions.append(part_name) if partitions: self._display_partitions(partitions) self.append_cmd_output("分区列表已更新,请在右侧选择要备份的分区") else: self.append_cmd_output("未找到分区信息") def connect_device(self): ip = self.ip_entry.get() port = self.port_entry.get() if not ip or not port: self.append_cmd_output("错误: 请输入IP地址和端口号") return self.append_cmd_output(f"尝试连接设备: {ip}:{port}") self.connect_btn.config(state=tk.DISABLED, text="连接中...") threading.Thread(target=self._adb_connect, args=(ip, port), daemon=True).start() def _adb_connect(self, ip, port): try: cmd = f"adb connect {ip}:{port}" self.append_cmd_output(f"执行: {cmd}") process = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True ) while True: output = process.stdout.readline() if output == '' and process.poll() is not None: break if output: self.root.after(0, self.append_cmd_output, output.strip()) stderr = process.stderr.read() if stderr: self.root.after(0, self.append_cmd_output, f"错误:\n{stderr.strip()}") if process.returncode == 0: self.root.after(0, self.status_bar.config, {"text": f"已连接: {ip}:{port}"}) else: self.root.after(0, self.append_cmd_output, "连接失败!") except Exception as e: self.root.after(0, self.append_cmd_output, f"连接出错: {str(e)}") finally: self.root.after(0, self.connect_btn.config, {"state": tk.NORMAL, "text": "连接设备"}) def _display_partitions(self, partitions): for part in partitions: self.partition_vars[part] = tk.BooleanVar() cb = tk.Checkbutton( self.partition_list_frame, text=part, variable=self.partition_vars[part], anchor='w', bg='#f0f0f0', font=('Arial', 10) ) cb.pack(fill=tk.X, pady=2, padx=5) self.partition_list_frame.update_idletasks() self.partition_canvas.config(scrollregion=self.partition_canvas.bbox("all")) self.partition_canvas.bind_all("<MouseWheel>", lambda event: self.partition_canvas.yview_scroll(int(-1 * (event.delta / 120)), "units")) def toggle_select_all(self): select_all = self.select_all_var.get() for var in self.partition_vars.values(): var.set(select_all) def start_backup(self): selected_partitions = [part for part, var in self.partition_vars.items() if var.get()] if not selected_partitions: messagebox.showwarning("警告", "请至少选择一个分区进行备份") return backup_dir = filedialog.askdirectory(title="选择备份保存目录") if not backup_dir: self.append_cmd_output("备份已取消") return self.append_cmd_output(f"将备份保存到: {backup_dir}") self.backup_btn.config(state=tk.DISABLED, text="备份中,请稍等...") threading.Thread( target=self._perform_backup, args=(selected_partitions, backup_dir), daemon=True ).start() def _perform_backup(self, partitions, backup_dir): try: self.append_cmd_output("准备备份环境...") self._execute_adb_command("adb shell \"rm -rf /sdcard/backup\"") self._execute_adb_command("adb shell \"mkdir -p /sdcard/backup\"") for i, partition in enumerate(partitions): progress = (i + 1) / len(partitions) * 100 self.append_cmd_output(f"正在备份 {partition} ({i + 1}/{len(partitions)}) - {progress:.1f}%") backup_cmd = f"adb shell \"dd if=/dev/block/{partition} | gzip -c > /sdcard/backup/{partition}.img.gz\"" self._execute_adb_command(backup_cmd) self.append_cmd_output("正在从设备下载备份文件...") pull_cmd = f"adb pull /sdcard/backup \"{backup_dir}\"" self._execute_adb_command(pull_cmd) self._execute_adb_command("adb shell \"rm -rf /sdcard/backup\"") self.append_cmd_output(f"备份完成! 文件已保存到: {backup_dir}") messagebox.showinfo("完成", f"备份已完成,文件保存在:\n{backup_dir}") except Exception as e: self.append_cmd_output(f"备份出错: {str(e)}") messagebox.showerror("错误", f"备份过程中出错:\n{str(e)}") finally: self.root.after(0, self.backup_btn.config, {"state": tk.NORMAL, "text": "备份选中分区"}) def _execute_adb_command(self, cmd): self.append_cmd_output(f"执行: {cmd}") process = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True ) while True: output = process.stdout.readline() if output == '' and process.poll() is not None: break if output: self.root.after(0, self.append_cmd_output, output.strip()) stderr = process.stderr.read() if stderr: self.root.after(0, self.append_cmd_output, f"错误:\n{stderr.strip()}") if process.returncode != 0: raise Exception(f"命令执行失败: {cmd}") def append_cmd_output(self, text): self.cmd_text.insert(tk.END, text + "\n") self.cmd_text.see(tk.END) if __name__ == "__main__": root = tk.Tk() app = ADBBackupTool(root) root.mainloop() 在这个基础上曾加http下载 sparsege格式 这个先取消
07-24
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值