SAM2模型onnxruntime和tensorrt推理

部署运行你感兴趣的模型镜像

onnxruntime推理

sam2_ort.py

import cv2
import numpy as np
import onnxruntime


class SAM2ImageEncoder:
    def __init__(self, path) -> None:
        self.session = onnxruntime.InferenceSession(path, providers=onnxruntime.get_available_providers())
        model_inputs = self.session.get_inputs()
        self.input_names = [model_inputs[i].name for i in range(len(model_inputs))]
        self.input_shape = model_inputs[0].shape
        self.input_height = self.input_shape[2]
        self.input_width = self.input_shape[3]
        model_outputs = self.session.get_outputs()
        self.output_names = [model_outputs[i].name for i in range(len(model_outputs))]

    def encode_image(self, image):
        input_tensor = self.prepare_input(image)
        outputs = self.infer(input_tensor)
        return outputs[0], outputs[1], outputs[2]

    def prepare_input(self, image):
        input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        input_img = cv2.resize(input_img, (self.input_width, self.input_height))
        input_img = (input_img - np.array([123.675, 116.28, 103.53])) / np.array([58.395, 57.12, 57.375])
        input_img = input_img.transpose(2, 0, 1)
        input_tensor = input_img[np.newaxis, :, :, :].astype(np.float32)
        return input_tensor

    def infer(self, input_tensor):
        return self.session.run(self.output_names, {self.input_names[0]: input_tensor})


class SAM2ImageDecoder:
    def __init__(self, path, encoder_input_size, orig_im_size, mask_threshold = 0.0):
        self.session = onnxruntime.InferenceSession(path, providers=onnxruntime.get_available_providers())
        self.orig_im_size = orig_im_size if orig_im_size is not None else encoder_input_size
        self.encoder_input_size = encoder_input_size
        self.mask_threshold = mask_threshold
        self.scale_factor = 4
        model_inputs = self.session.get_inputs()
        self.input_names = [model_inputs[i].name for i in range(len(model_inputs))]
        model_outputs = self.session.get_outputs()
        self.output_names = [model_outputs[i].name for i in range(len(model_outputs))]

    def predict(self, image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels):
        inputs = self.prepare_inputs(image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels)
        outputs = self.infer(inputs)
        return self.process_output(outputs)

    def prepare_inputs(self, image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels):
        input_point_coords, input_point_labels = self.prepare_points(point_coords, point_labels)
        num_labels = input_point_labels.shape[0]
        mask_input = np.zeros((num_labels, 1, self.encoder_input_size[0] // self.scale_factor, self.encoder_input_size[1] // self.scale_factor), dtype=np.float32)
        has_mask_input = np.array([0], dtype=np.float32)
        original_size = np.array([self.orig_im_size[0], self.orig_im_size[1]], dtype=np.int32)
        return image_embed, high_res_feats_0, high_res_feats_1, input_point_coords, input_point_labels, mask_input, has_mask_input, original_size

    def prepare_points(self, point_coords, point_labels):
        input_point_coords = point_coords[np.newaxis, ...]
        input_point_labels = point_labels[np.newaxis, ...]
        input_point_coords[..., 0] = input_point_coords[..., 0] / self.orig_im_size[1] * self.encoder_input_size[1]  # Normalize x
        input_point_coords[..., 1] = input_point_coords[..., 1] / self.orig_im_size[0] * self.encoder_input_size[0]  # Normalize y
        return input_point_coords.astype(np.float32), input_point_labels.astype(np.float32)

    def infer(self, inputs):
        return self.session.run(self.output_names, {self.input_names[i]: inputs[i] for i in range(len(self.input_names))})

    def process_output(self, outputs):
        scores = outputs[1].squeeze()
        masks = outputs[0] > self.mask_threshold
        masks = masks.astype(np.uint8).squeeze()
        return masks, scores


class SAM2Image_ORT:
    def __init__(self, encoder_path, decoder_path):
        self.encoder = SAM2ImageEncoder(encoder_path)
        self.decoder_path = decoder_path
        self.decoder = {}
        self.point_coords = {}
        self.point_labels = {}
        self.box_coords = {}
        self.masks = {}

    def set_image(self, image):
        self.image_embeddings = self.encoder.encode_image(image)
        self.orig_im_size = (image.shape[0], image.shape[1])

    def set_point(self, point_coords, label_id):
        self.decoder[label_id] = SAM2ImageDecoder(self.decoder_path, self.encoder.input_shape[2:], self.orig_im_size)
        self.point_coords[label_id] = np.array([point_coords])
        self.point_labels[label_id] = np.array([1])
        return self.decode_mask(label_id)
    
    def set_box(self, box_coords):
        self.decoder[0] = SAM2ImageDecoder(self.decoder_path, self.encoder.input_shape[2:], self.orig_im_size)
        self.box_coords[0] = np.array([[box_coords[0], box_coords[1]], [box_coords[2], box_coords[3]]]) 
        return self.decode_mask(0)

    def decode_mask(self, label_id):
        concat_coords = []
        concat_labels = []
        if len(self.point_coords) > 0:
            concat_coords.append(self.point_coords[label_id])
            concat_labels.append(self.point_labels[label_id])
        if len(self.box_coords) > 0:
            concat_coords.append(self.box_coords[label_id])
            concat_labels.append(np.array([2, 3]))
        concat_coords = np.concatenate(concat_coords, axis=0)
        concat_labels = np.concatenate(concat_labels, axis=0)
        high_res_feats_0, high_res_feats_1, image_embed = self.image_embeddings
        mask, _ = self.decoder[label_id].predict(image_embed, high_res_feats_0, high_res_feats_1, concat_coords, concat_labels)
        mask = cv2.resize(mask, (self.orig_im_size[1], self.orig_im_size[0]), interpolation=cv2.INTER_LINEAR)
        self.masks[label_id] = mask
        return self.masks

inference.py

import cv2
import numpy as np
from sam2_ort import SAM2Image_ORT

image = cv2.imread("dog.jpg")
sam2 = SAM2Image_ORT("sam2.1_hiera_base_plus_encoder.onnx",  "sam2.1_hiera_base_plus_decoder.onnx")
sam2.set_image(image)

points = [[200, 500], [600, 240]]
box = [0, 219, 417, 528] 
masks = sam2.set_point(points[0], 0)
masks = sam2.set_point(points[1], 1)
# masks = sam2.set_box(box)

result = np.ones_like(image)
for id, mask in masks.items():
    result[mask > 0] = [np.random.randint(0, 256), np.random.randint(0, 256), np.random.randint(0, 256)]
result = cv2.addWeighted(image, 0.5, result, 0.5, 0)
for p in points:
    cv2.circle(result, (p[0], p[1]), 2, (255, 0, 0), -1)
if len(box) == 4:
    cv2.rectangle(result, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)
cv2.imwrite("result.jpg", result)

tensorrt推理

sam2_trt.py

import cv2
import numpy as np
import tensorrt as trt
import common


logger = trt.Logger(trt.Logger.WARNING)

class SAM2ImageEncoder:
    def __init__(self, model_path) -> None:
        with open(model_path, "rb") as f, trt.Runtime(logger) as runtime:
            self.engine = runtime.deserialize_cuda_engine(f.read())
        self.context = self.engine.create_execution_context()
        self.inputs, self.outputs, self.bindings, self.stream = common.allocate_buffers(self.engine)
        self.input_height = 1024
        self.input_width = 1024
        self.input_shape = (1, 3, self.input_height, self.input_width)

    def encode_image(self, image):
        input_tensor = self.prepare_input(image)
        outputs = self.infer(input_tensor)
        return outputs[0], outputs[1], outputs[2]

    def prepare_input(self, image):
        input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        input_img = cv2.resize(input_img, (self.input_width, self.input_height))
        input_img = (input_img - np.array([123.675, 116.28, 103.53])) / np.array([58.395, 57.12, 57.375])
        input_img = input_img.transpose(2, 0, 1)
        input_tensor = input_img[np.newaxis, :, :, :].astype(np.float32)
        return input_tensor

    def infer(self, input_tensor):
        np.copyto(self.inputs[0].host, input_tensor.ravel())
        output = common.do_inference(self.context, self.engine, self.bindings, self.inputs, self.outputs, self.stream)
        outputs = [output[0].reshape(1, 32, 256, 256), output[1].reshape(1, 64, 128, 128), output[2].reshape(1, 256, 64, 64)]
        return outputs


class SAM2ImageDecoder:
    def __init__(self, model_path, encoder_input_size, orig_im_size, mask_threshold = 0.0):
        with open(model_path, "rb") as f, trt.Runtime(logger) as runtime:
            self.engine = runtime.deserialize_cuda_engine(f.read())
        self.context = self.engine.create_execution_context()
        self.inputs, self.outputs, self.bindings, self.stream = common.allocate_buffers(self.engine)
        self.orig_im_size = orig_im_size if orig_im_size is not None else encoder_input_size
        self.encoder_input_size = encoder_input_size
        self.mask_threshold = mask_threshold
        self.scale_factor = 4

    def predict(self, image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels):
        inputs = self.prepare_inputs(image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels)
        outputs = self.infer(inputs)
        return self.process_output(outputs)

    def prepare_inputs(self, image_embed, high_res_feats_0, high_res_feats_1, point_coords, point_labels):
        input_point_coords, input_point_labels = self.prepare_points(point_coords, point_labels)
        num_labels = input_point_labels.shape[0]
        mask_input = np.zeros((num_labels, 1, self.encoder_input_size[0] // self.scale_factor, self.encoder_input_size[1] // self.scale_factor), dtype=np.float32)
        has_mask_input = np.array([0], dtype=np.float32)
        return image_embed, high_res_feats_0, high_res_feats_1, input_point_coords, input_point_labels, mask_input, has_mask_input

    def prepare_points(self, point_coords, point_labels):
        input_point_coords = point_coords[np.newaxis, ...]
        input_point_labels = point_labels[np.newaxis, ...]
        input_point_coords[..., 0] = input_point_coords[..., 0] / self.orig_im_size[1] * self.encoder_input_size[1]  # Normalize x
        input_point_coords[..., 1] = input_point_coords[..., 1] / self.orig_im_size[0] * self.encoder_input_size[0]  # Normalize y
        return input_point_coords.astype(np.float32), input_point_labels.astype(np.float32)

    def infer(self, inputs):  
        np.copyto(self.inputs[0].host, inputs[0].ravel())
        np.copyto(self.inputs[1].host, inputs[1].ravel())
        np.copyto(self.inputs[2].host, inputs[2].ravel())
        np.copyto(self.inputs[3].host, inputs[3].ravel())
        np.copyto(self.inputs[4].host, inputs[4].ravel())
        np.copyto(self.inputs[5].host, inputs[5].ravel())
        np.copyto(self.inputs[6].host, inputs[6].ravel())
        output = common.do_inference(self.context, self.engine, self.bindings, self.inputs, self.outputs, self.stream)
        outputs = [output[0].reshape(1, 1, 256, 256), output[1].reshape(1, 1)]
        return outputs

    def process_output(self, outputs):
        scores = outputs[1].squeeze()
        masks = outputs[0] > self.mask_threshold
        masks = masks.astype(np.uint8).squeeze()
        return masks, scores


class SAM2Image_TRT:
    def __init__(self, encoder_path, decoder_path):
        self.encoder = SAM2ImageEncoder(encoder_path)
        self.decoder_path = decoder_path
        self.decoder = {}
        self.point_coords = {}
        self.point_labels = {}
        self.box_coords = {}
        self.masks = {}

    def set_image(self, image):
        self.image_embeddings = self.encoder.encode_image(image)
        self.orig_im_size = (image.shape[0], image.shape[1])

    def set_point(self, point_coords, label_id):
        self.decoder[label_id] = SAM2ImageDecoder(self.decoder_path, self.encoder.input_shape[2:], self.orig_im_size)
        self.point_coords[label_id] = np.array([point_coords])
        self.point_labels[label_id] = np.array([1])
        return self.decode_mask(label_id)
    
    def set_box(self, box_coords):
        self.decoder[0] = SAM2ImageDecoder(self.decoder_path, self.encoder.input_shape[2:], self.orig_im_size)
        self.box_coords[0] = np.array([[box_coords[0], box_coords[1]], [box_coords[2], box_coords[3]]]) 
        return self.decode_mask(0)

    def decode_mask(self, label_id):
        concat_coords = []
        concat_labels = []
        if len(self.point_coords) > 0:
            concat_coords.append(self.point_coords[label_id])
            concat_labels.append(self.point_labels[label_id])
        if len(self.box_coords) > 0:
            concat_coords.append(self.box_coords[label_id])
            concat_labels.append(np.array([2, 3]))
        concat_coords = np.concatenate(concat_coords, axis=0)
        concat_labels = np.concatenate(concat_labels, axis=0)
        high_res_feats_0, high_res_feats_1, image_embed = self.image_embeddings
        mask, _ = self.decoder[label_id].predict(image_embed, high_res_feats_0, high_res_feats_1, concat_coords, concat_labels)
        mask = cv2.resize(mask, (self.orig_im_size[1], self.orig_im_size[0]), interpolation=cv2.INTER_LINEAR)
        self.masks[label_id] = mask
        return self.masks

common.py

#
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import argparse
import os
import ctypes
from typing import Optional, List

import numpy as np
import tensorrt as trt
from cuda import cuda, cudart

try:
    # Sometimes python does not understand FileNotFoundError
    FileNotFoundError
except NameError:
    FileNotFoundError = IOError

EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)

def check_cuda_err(err):
    if isinstance(err, cuda.CUresult):
        if err != cuda.CUresult.CUDA_SUCCESS:
            raise RuntimeError("Cuda Error: {}".format(err))
    if isinstance(err, cudart.cudaError_t):
        if err != cudart.cudaError_t.cudaSuccess:
            raise RuntimeError("Cuda Runtime Error: {}".format(err))
    else:
        raise RuntimeError("Unknown error type: {}".format(err))

def cuda_call(call):
    err, res = call[0], call[1:]
    check_cuda_err(err)
    if len(res) == 1:
        res = res[0]
    return res

def GiB(val):
    return val * 1 << 30


def add_help(description):
    parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    args, _ = parser.parse_known_args()


def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[], err_msg=""):
    """
    Parses sample arguments.

    Args:
        description (str): Description of the sample.
        subfolder (str): The subfolder containing data relevant to this sample
        find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path.

    Returns:
        str: Path of data directory.
    """

    # Standard command-line arguments for all samples.
    kDEFAULT_DATA_ROOT = os.path.join(os.sep, "usr", "src", "tensorrt", "data")
    parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        "-d",
        "--datadir",
        help="Location of the TensorRT sample data directory, and any additional data directories.",
        action="append",
        default=[kDEFAULT_DATA_ROOT],
    )
    args, _ = parser.parse_known_args()

    def get_data_path(data_dir):
        # If the subfolder exists, append it to the path, otherwise use the provided path as-is.
        data_path = os.path.join(data_dir, subfolder)
        if not os.path.exists(data_path):
            if data_dir != kDEFAULT_DATA_ROOT:
                print("WARNING: " + data_path + " does not exist. Trying " + data_dir + " instead.")
            data_path = data_dir
        # Make sure data directory exists.
        if not (os.path.exists(data_path)) and data_dir != kDEFAULT_DATA_ROOT:
            print(
                "WARNING: {:} does not exist. Please provide the correct data path with the -d option.".format(
                    data_path
                )
            )
        return data_path

    data_paths = [get_data_path(data_dir) for data_dir in args.datadir]
    return data_paths, locate_files(data_paths, find_files, err_msg)


def locate_files(data_paths, filenames, err_msg=""):
    """
    Locates the specified files in the specified data directories.
    If a file exists in multiple data directories, the first directory is used.

    Args:
        data_paths (List[str]): The data directories.
        filename (List[str]): The names of the files to find.

    Returns:
        List[str]: The absolute paths of the files.

    Raises:
        FileNotFoundError if a file could not be located.
    """
    found_files = [None] * len(filenames)
    for data_path in data_paths:
        # Find all requested files.
        for index, (found, filename) in enumerate(zip(found_files, filenames)):
            if not found:
                file_path = os.path.abspath(os.path.join(data_path, filename))
                if os.path.exists(file_path):
                    found_files[index] = file_path

    # Check that all files were found
    for f, filename in zip(found_files, filenames):
        if not f or not os.path.exists(f):
            raise FileNotFoundError(
                "Could not find {:}. Searched in data paths: {:}\n{:}".format(filename, data_paths, err_msg)
            )
    return found_files


class HostDeviceMem:
    """Pair of host and device memory, where the host memory is wrapped in a numpy array"""
    def __init__(self, size: int, dtype: np.dtype):
        nbytes = size * dtype.itemsize
        host_mem = cuda_call(cudart.cudaMallocHost(nbytes))
        pointer_type = ctypes.POINTER(np.ctypeslib.as_ctypes_type(dtype))

        self._host = np.ctypeslib.as_array(ctypes.cast(host_mem, pointer_type), (size,))
        self._device = cuda_call(cudart.cudaMalloc(nbytes))
        self._nbytes = nbytes

    @property
    def host(self) -> np.ndarray:
        return self._host

    @host.setter
    def host(self, arr: np.ndarray):
        if arr.size > self.host.size:
            raise ValueError(
                f"Tried to fit an array of size {arr.size} into host memory of size {self.host.size}"
            )
        np.copyto(self.host[:arr.size], arr.flat, casting='safe')

    @property
    def device(self) -> int:
        return self._device

    @property
    def nbytes(self) -> int:
        return self._nbytes

    def __str__(self):
        return f"Host:\n{self.host}\nDevice:\n{self.device}\nSize:\n{self.nbytes}\n"

    def __repr__(self):
        return self.__str__()

    def free(self):
        cuda_call(cudart.cudaFree(self.device))
        cuda_call(cudart.cudaFreeHost(self.host.ctypes.data))


# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
# If engine uses dynamic shapes, specify a profile to find the maximum input & output size.
def allocate_buffers(engine: trt.ICudaEngine, profile_idx: Optional[int] = None):
    inputs = []
    outputs = []
    bindings = []
    stream = cuda_call(cudart.cudaStreamCreate())
    tensor_names = [engine.get_tensor_name(i) for i in range(engine.num_io_tensors)]
    for binding in tensor_names:
        # get_tensor_profile_shape returns (min_shape, optimal_shape, max_shape)
        # Pick out the max shape to allocate enough memory for the binding.
        shape = engine.get_tensor_shape(binding) if profile_idx is None else engine.get_tensor_profile_shape(binding, profile_idx)[-1]
        shape_valid = np.all([s >= 0 for s in shape])
        if not shape_valid and profile_idx is None:
            raise ValueError(f"Binding {binding} has dynamic shape, " +\
                "but no profile was specified.")
        size = trt.volume(shape)
        if engine.has_implicit_batch_dimension:
            size *= engine.max_batch_size
        dtype = np.dtype(trt.nptype(engine.get_tensor_dtype(binding)))

        # Allocate host and device buffers
        bindingMemory = HostDeviceMem(size, dtype)

        # Append the device buffer to device bindings.
        bindings.append(int(bindingMemory.device))

        # Append to the appropriate list.
        if engine.get_tensor_mode(binding) == trt.TensorIOMode.INPUT:
            inputs.append(bindingMemory)
        else:
            outputs.append(bindingMemory)
    return inputs, outputs, bindings, stream


# Frees the resources allocated in allocate_buffers
def free_buffers(inputs: List[HostDeviceMem], outputs: List[HostDeviceMem], stream: cudart.cudaStream_t):
    for mem in inputs + outputs:
        mem.free()
    cuda_call(cudart.cudaStreamDestroy(stream))


# Wrapper for cudaMemcpy which infers copy size and does error checking
def memcpy_host_to_device(device_ptr: int, host_arr: np.ndarray):
    nbytes = host_arr.size * host_arr.itemsize
    cuda_call(cudart.cudaMemcpy(device_ptr, host_arr, nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice))


# Wrapper for cudaMemcpy which infers copy size and does error checking
def memcpy_device_to_host(host_arr: np.ndarray, device_ptr: int):
    nbytes = host_arr.size * host_arr.itemsize
    cuda_call(cudart.cudaMemcpy(host_arr, device_ptr, nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost))


def _do_inference_base(inputs, outputs, stream, execute_async):
    # Transfer input data to the GPU.
    kind = cudart.cudaMemcpyKind.cudaMemcpyHostToDevice
    [cuda_call(cudart.cudaMemcpyAsync(inp.device, inp.host, inp.nbytes, kind, stream)) for inp in inputs]
    # Run inference.
    execute_async()
    # Transfer predictions back from the GPU.
    kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost
    [cuda_call(cudart.cudaMemcpyAsync(out.host, out.device, out.nbytes, kind, stream)) for out in outputs]
    # Synchronize the stream
    cuda_call(cudart.cudaStreamSynchronize(stream))
    # Return only the host outputs.
    return [out.host for out in outputs]


def do_inference(context, engine, bindings, inputs, outputs, stream):
    def execute_async_func():
        context.execute_async_v3(stream_handle=stream)
    # Setup context tensor address.
    num_io = engine.num_io_tensors
    for i in range(num_io):
        context.set_tensor_address(engine.get_tensor_name(i), bindings[i])
    return _do_inference_base(inputs, outputs, stream, execute_async_func)

inference.py

import cv2
import numpy as np
from sam2_trt import SAM2Image_TRT


image = cv2.imread("dog.jpg")
sam2 = SAM2Image_TRT("sam2.1_hiera_base_plus_encoder.engine",  "sam2.1_hiera_base_plus_decoder.engine")
sam2.set_image(image)

points = [[200, 500], [600, 240]]
box = [0, 219, 417, 528] 
masks = sam2.set_point(points[0], 0)
masks = sam2.set_point(points[1], 1)
# masks = sam2.set_box(box)

result = np.ones_like(image)
for id, mask in masks.items():
    result[mask > 0] = [np.random.randint(0, 256), np.random.randint(0, 256), np.random.randint(0, 256)]
result = cv2.addWeighted(image, 0.5, result, 0.5, 0)
for p in points:
    cv2.circle(result, (p[0], p[1]), 2, (255, 0, 0), -1)
if len(box) == 4:
    cv2.rectangle(result, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)
cv2.imwrite("result.jpg", result)

分割结果:
在这里插入图片描述

模型文件在网盘链接:
https://pan.baidu.com/s/1tLM3uzUKoTaB2Fyxftr3GA?pwd=p6cw 提取码: p6cw

您可能感兴趣的与本文相关的镜像

Python3.11

Python3.11

Conda
Python

Python 是一种高级、解释型、通用的编程语言,以其简洁易读的语法而闻名,适用于广泛的应用,包括Web开发、数据分析、人工智能和自动化脚本

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

给算法爸爸上香

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值