download_saveimage流程

本文详细解析了Chromium浏览器中从用户点击右键保存链接到完成文件下载的整个过程。涉及核心组件包括ContextMenuHelper、DownloadController及DownloadManagerImpl等。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Y:\chromium_org\src\chrome\android\java\src\org\chromium\chrome\browser\contextmenu\ChromeContextMenuPopulator.java

 

public booleanonItemSelected(ContextMenuHelper helper, ContextMenuParams params, int itemId){

……

} else if (itemId == R.id.contextmenu_save_image){

           ContextMenuUma.record(params, ContextMenuUma.ACTION_SAVE_IMAGE);

           if (mDelegate.startDownload(params.getSrcUrl(), false)) {

                helper.startContextMenuDownload(

                        false,mDelegate.isDataReductionProxyEnabledForURL(params.getSrcUrl()));

           }

       } else if (itemId == R.id.contextmenu_save_video) {

           ContextMenuUma.record(params, ContextMenuUma.ACTION_SAVE_VIDEO);

           if (mDelegate.startDownload(params.getSrcUrl(), false)) {

                helper.startContextMenuDownload(false,false);

           }

       } else if (itemId == R.id.contextmenu_save_link_as) {

           ContextMenuUma.record(params, ContextMenuUma.ACTION_SAVE_LINK);

           String url =params.getUnfilteredLinkUrl();

           if (mDelegate.startDownload(url, true)) {

               ContextMenuUma.recordSaveLinkTypes(url);

                helper.startContextMenuDownload(true,false);

           }

       }

…..

}

 

 

Y:\chromium_org\src\chrome\android\java\src\org\chromium\chrome\browser\contextmenu\ContextMenuHelper.java

 

publicvoid startContextMenuDownload(boolean isLink,boolean isDataReductionProxyEnabled) {

 

privatenative void nativeOnStartDownload(

            long nativeContextMenuHelper,boolean isLink, boolean isDataReductionProxyEnabled);

 

 

 

X:\SmartGit_trunk_2.0\chromium_2125\android_packages_apps_Browser_chromium\src\chrome\browser\ui\android\context_menu_helper.cc

 

voidContextMenuHelper::OnStartDownload(

    JNIEnv* env,

    const JavaParamRef<jobject>& obj,

    jboolean jis_link,

jboolean jis_data_reduction_proxy_enabled)

 

 

 

 

Y:\chromium_org\src\chrome\browser\android\download\download_controller.cc

voidDownloadController::StartContextMenuDownload(

    const ContextMenuParams& params,WebContents* web_contents, bool is_link,

const std::string& extra_headers)

 

void CreateContextMenuDownload(int render_process_id,

                               intrender_view_id,

                               constcontent::ContextMenuParams& params,

                               bool is_link,

                               conststd::string& extra_headers,

                               bool granted)

{

……

         if (!is_link &&extra_headers.empty())

             dl_params->set_prefer_cache(true);

       dl_params->set_prompt(false);

       dlm->DownloadUrl(std::move(dl_params));

……

}

 

/chromium_new/src/content/browser/download/download_manager_impl.cc

voidDownloadManagerImpl::DownloadUrl(

std::unique_ptr<DownloadUrlParameters> params){

……

BrowserThread::PostTaskAndReplyWithResult(

      BrowserThread::IO,FROM_HERE,

     base::Bind(&BeginDownload, base::Passed(&params),

                browser_context_->GetResourceContext(),

                content::DownloadItem::kInvalidId, weak_factory_.GetWeakPtr()),

     base::Bind(&DownloadManagerImpl::AddUrlDownloader,

                weak_factory_.GetWeakPtr()));

……

}

 

 

std::unique_ptr<UrlDownloader,BrowserThread::DeleteOnIOThread> BeginDownload(

    std::unique_ptr<DownloadUrlParameters>params,

    content::ResourceContext* resource_context,

    uint32_t download_id,

base::WeakPtr<DownloadManagerImpl> download_manager){



/chromium_new/src/content/browser/loader/resource_dispatcher_host_impl.cc

DownloadInterruptReasonResourceDispatcherHostImpl::BeginDownload(

    std::unique_ptr<net::URLRequest>request,

    const Referrer& referrer,

    bool is_content_initiated,

    ResourceContext* context,

    int render_process_id,

    int render_view_route_id,

    int render_frame_route_id,

bool do_not_prompt_for_login)

 

ResourceDispatcherHostImpl::CreateResourceHandlerForDownload(

    net::URLRequest* request,

    bool is_content_initiated,

bool must_download)

 

voidResourceDispatcherHostImpl::BeginRequestInternal(

    std::unique_ptr<net::URLRequest>request,

std::unique_ptr<ResourceHandler> handler)

 

 

 

 

 

 

//////////////////////////////////////////////////////////

/chromium_2125_2.0/src/content/browser/download/download_resource_handler.cc

boolDownloadResourceHandler::OnResponseStarted(

    ResourceResponse* response,

bool* defer)

 

staticvoid StartOnUIThread(

    scoped_ptr<DownloadCreateInfo> info,

    DownloadResourceHandler::DownloadTabInfo*tab_info,

    scoped_ptr<ByteStreamReader> stream,

const DownloadUrlParameters::OnStartedCallback& started_cb)

 

/chromium_2125_2.0/src/content/browser/download/download_manager_impl.cc

voidDownloadManagerImpl::StartDownload(

    scoped_ptr<DownloadCreateInfo> info,

    scoped_ptr<ByteStreamReader> stream,

const DownloadUrlParameters::OnStartedCallback& on_started)

 

voidDownloadManagerImpl::StartDownloadWithId(

    scoped_ptr<DownloadCreateInfo> info,

    scoped_ptr<ByteStreamReader> stream,

    constDownloadUrlParameters::OnStartedCallback& on_started,

    bool new_download,

uint32 id){

……

  base::FilePath default_download_directory;

  if (delegate_) {

    base::FilePath website_save_directory;  // Unused

    bool skip_dir_check = false;            // Unused

   delegate_->GetSaveDir(GetBrowserContext(),&website_save_directory,

                          &default_download_directory,&skip_dir_check);

  }

……

download->Start(download_file.Pass(),req_handle.Pass());

……

}

 

/chromium_2125_2.0/src/content/browser/download/download_item_impl.cc

voidDownloadItemImpl::Start(

    scoped_ptr<DownloadFile> file,

scoped_ptr<DownloadRequestHandleInterface> req_handle)


void DownloadItemImpl::OnDownloadFileInitialized(

DownloadInterruptReason result)

 

voidDownloadItemImpl::OnDownloadTargetDetermined(

    const base::FilePath& target_path,

    TargetDisposition disposition,

    DownloadDangerType danger_type,

const base::FilePath& intermediate_path)

 

voidDownloadItemImpl::OnDownloadRenamedToIntermediateName(

    DownloadInterruptReason reason,

const base::FilePath& full_path)

 

delegate上层需要设置

/chromium_2125_2.0/src/chrome/browser/download/

chrome_download_manager_delegate.cc

 

import requests import numpy as np from PIL import Image, ImageEnhance, ImageOps from io import BytesIO import os import random from concurrent.futures import ThreadPoolExecutor import time # 配置参数 INPUT_FILE = "face.txt" OUTPUT_DIR = "augmented_faces" AUG_PER_IMAGE = 4 # 每张图生成4张增强图 TIMEOUT = 10 # 下载超时时间(秒) MAX_RETRIES = 3 # 下载重试次数 NUM_THREADS = 4 # 并发下载线程数 # 创建输出目录 os.makedirs(OUTPUT_DIR, exist_ok=True) def download_image_with_retry(url, retries=MAX_RETRIES): """带重试机制的图片下载函数""" for _ in range(retries): try: response = requests.get(url, timeout=TIMEOUT) response.raise_for_status() return Image.open(BytesIO(response.content)).convert("RGB") except Exception as e: print(f"下载失败 {url}: {str(e)}, 重试中...") time.sleep(1) print(f"多次尝试下载失败 {url}") return None def apply_advanced_augmentations(img): """应用更高级的数据增强""" # 随机水平翻转 if random.random() > 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) # 随机旋转并填充 angle = random.uniform(-15, 15) img = img.rotate(angle, expand=True, fillcolor=(255, 255, 255)) # 颜色增强 enhancer = ImageEnhance.Color(img) img = enhancer.enhance(random.uniform(0.8, 1.2)) # 随机调整亮度和对比度 brightness = ImageEnhance.Brightness(img) img = brightness.enhance(random.uniform(0.8, 1.2)) contrast = ImageEnhance.Contrast(img) img = contrast.enhance(random.uniform(0.8, 1.2)) # 随机裁剪 w, h = img.size crop_ratio = random.uniform(0.8, 1.0) new_w, new_h = int(w * crop_ratio), int(h * crop_ratio) left = random.randint(0, w - new_w) top = random.randint(0, h - new_h) img = img.crop((left, top, left + new_w, top + new_h)) # 添加高斯噪声 if random.random() > 0.7: arr = np.array(img).astype(np.float32) noise = np.random.normal(0, random.randint(5, 20), arr.shape) arr = np.clip(arr + noise, 0, 255) img = Image.fromarray(arr.astype(np.uint8)) # 随机调整饱和度 if random.random() > 0.5: saturation = ImageEnhance.Color(img) img = saturation.enhance(random.uniform(0.8, 1.2)) return img def process_and_save(img, base_name, count, idx): """处理并保存单张图片的增强结果""" saved = 0 for i in range(AUG_PER_IMAGE): try: aug_img = apply_advanced_augmentations(img) output_path = f"{OUTPUT_DIR}/{base_name}_aug{count}_{i}.jpg" aug_img.save(output_path, quality=95, optimize=True) saved += 1 except Exception as e: print(f"增强失败 {base_name}: {str(e)}") return saved def process_url(url, count): """处理单个URL并返回成功保存的数量""" img = download_image_with_retry(url) if img is None: return 0 base_name = os.path.basename(url).split('.')[0] return process_and_save(img, base_name, count, count) # 主执行流程 def main(): success_count = 0 with open(INPUT_FILE) as f: urls = [line.strip() for line in f if line.strip()] # 使用线程池并发处理 with ThreadPoolExecutor(max_workers=NUM_THREADS) as executor: futures = [] for idx, url in enumerate(urls): futures.append(executor.submit(process_url, url, idx)) for future in futures: try: saved = future.result() success_count += saved except Exception as e: print(f"处理URL时出错: {str(e)}") print(f"数据增强完成!成功生成 {success_count} 张图片") if __name__ == "__main__": main()
03-12
按照上述方案,请将该代码优化修整:from __future__ import annotations import sys sys.path.append("D:/LY/work/DiffV2IR/download-package") sys.path.append("D:/LY/work/DiffV2IR/download-package/CLIP") sys.path.append("D:/LY/work/DiffV2IR/download-package/k-diffusion") sys.path.append("D:/LY/work/DiffV2IR/download-package/stable_diffusion") sys.path.append("D:/LY/work/DiffV2IR/download-package/taming-transformers") import math import random from argparse import ArgumentParser import os import einops import k_diffusion as K import numpy as np import torch import torch.nn as nn from einops import rearrange from omegaconf import OmegaConf from PIL import Image, ImageOps from torch import autocast import shutil import requests import torch from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from blip_models.blip import blip_decoder import json import os from stable_diffusion.ldm.util import instantiate_from_config import json class CFGDenoiser(nn.Module): def __init__(self, model): super().__init__() self.inner_model = model def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale,seg_cfg_scale): cfg_z = einops.repeat(z, "1 ... -> n ...", n=4) cfg_sigma = einops.repeat(sigma, "1 ... -> n ...", n=4) cfg_cond = { "c_crossattn": [torch.cat([cond["c_crossattn"][0], uncond["c_crossattn"][0],uncond["c_crossattn"][0], uncond["c_crossattn"][0]])], "c_concat1": [torch.cat([cond["c_concat1"][0], cond["c_concat1"][0], uncond["c_concat1"][0], uncond["c_concat1"][0]])], "c_concat2": [torch.cat([cond["c_concat2"][0], cond["c_concat2"][0],cond["c_concat2"][0], uncond["c_concat2"][0]])], } out_cond, out_img_cond, out_seg_cond, out_uncond = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(4) return out_uncond + text_cfg_scale * (out_cond - out_img_cond) + image_cfg_scale * (out_img_cond - out_seg_cond)+seg_cfg_scale * (out_seg_cond - out_uncond) def get_text_for_image(image_filename, json_file): with open(json_file, 'r', encoding='utf-8') as infile: image_text_data = json.load(infile) if image_filename in image_text_data: return image_text_data[image_filename] else: return None def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] if vae_ckpt is not None: print(f"Loading VAE from {vae_ckpt}") vae_sd = torch.load(vae_ckpt, map_location="cpu")["state_dict"] sd = { k: vae_sd[k[len("first_stage_model.") :]] if k.startswith("first_stage_model.") else v for k, v in sd.items() } model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") print(u) return model def load_demo_image(image_size,device,img_url): raw_image = Image.open(img_url).convert('RGB') w,h = raw_image.size transform = transforms.Compose([ transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) ]) image = transform(raw_image).unsqueeze(0) return image def main(): parser = ArgumentParser() parser.add_argument("--resolution", default=512, type=int) parser.add_argument("--steps", default=100, type=int) parser.add_argument("--config", default="configs/generate.yaml", type=str) parser.add_argument("--ckpt", default="", type=str) parser.add_argument("--vae-ckpt", default=None, type=str) parser.add_argument("--input", required=True, type=str) parser.add_argument("--output", required=True, type=str) parser.add_argument("--edit", default="turn the RGB image into the infrared one",type=str) parser.add_argument("--cfg-text", default=7.5, type=float) parser.add_argument("--cfg-image", default=1.5, type=float) parser.add_argument("--cfg-seg", default=1.5, type=float) parser.add_argument("--seed", type=int) args = parser.parse_args() #os.makedirs('/home/jovyan/.cache/torch/hub/checkpoints/') #shutil.copy("checkpoint_liberty_with_aug.pth","/home/jovyan/.cache/torch/hub/checkpoints/") config = OmegaConf.load(args.config) model = load_model_from_config(config, args.ckpt, args.vae_ckpt) model.eval().cuda() model_wrap = K.external.CompVisDenoiser(model) model_wrap_cfg = CFGDenoiser(model_wrap) null_token = model.get_learned_conditioning([""]) blip_model = blip_decoder(pretrained="D:/LY/work/DiffV2IR/download-package/model_base_caption_capfilt_large.pth", image_size=384, vit='base') blip_model.eval() seed = random.randint(0, 100000) if args.seed is None else args.seed for root, dirs, files in os.walk(args.input): for file in files: image = load_demo_image(image_size=384, device='cuda',img_url=os.path.join(root,file)) with torch.no_grad(): caption = blip_model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5) args.edit = "turn the visible image of "+caption[0]+" into infrared" input_image = Image.open(os.path.join(args.input,file)).convert("RGB") input_seg = Image.open(os.path.join(args.input+"_seg",file.split(".")[0]+".png")).convert("RGB") width, height = input_image.size factor = args.resolution / max(width, height) factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height) width = int((width * factor) // 64) * 64 height = int((height * factor) // 64) * 64 input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS) input_seg = ImageOps.fit(input_seg, (width, height), method=Image.Resampling.LANCZOS) if args.edit == "": input_image.save(os.path.join(args.output,file)) return with torch.no_grad(), autocast("cuda"), model.ema_scope(): cond = {} cond["c_crossattn"] = [model.get_learned_conditioning([args.edit])] input_image = 2 * torch.tensor(np.array(input_image)).float() / 255 - 1 input_seg = 2 * torch.tensor(np.array(input_seg)).float() / 255 - 1 input_image = rearrange(input_image, "h w c -> 1 c h w").to(model.device) input_seg = rearrange(input_seg, "h w c -> 1 c h w").to(model.device) cond["c_concat1"] = [model.encode_first_stage(input_image).mode()] cond["c_concat2"] = [model.encode_first_stage(input_seg).mode()] uncond = {} uncond["c_crossattn"] = [null_token] uncond["c_concat1"] = [torch.zeros_like(cond["c_concat1"][0])] uncond["c_concat2"] = [torch.zeros_like(cond["c_concat2"][0])] sigmas = model_wrap.get_sigmas(args.steps) extra_args = { "cond": cond, "uncond": uncond, "text_cfg_scale": args.cfg_text, "image_cfg_scale": args.cfg_image, "seg_cfg_scale": args.cfg_seg, } torch.manual_seed(seed) z = torch.randn_like(cond["c_concat1"][0]) * sigmas[0] z = K.sampling.sample_euler_ancestral(model_wrap_cfg, z, sigmas, extra_args=extra_args) x = model.decode_first_stage(z) x = torch.clamp((x + 1.0) / 2.0, min=0.0, max=1.0) x = 255.0 * rearrange(x, "1 c h w -> h w c") edited_image = Image.fromarray(x.type(torch.uint8).cpu().numpy()) edited_image.save(os.path.join(args.output,file)) if __name__ == "__main__": main()
最新发布
05-31
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值