image_tag显示非public/images文件夹下的图片

项目需要实现上传图片的功能,经过努力已经全部搞定了。
可是,因为随着项目的使用,被上传的图片可能越来越多。
会导致public/images变的很大。
所以想,是不是应该把图片放到别的地方呢。
稍作修改之后,也实现了这个想法。
但是,发现了一个小问题。

image_tag不能显示出public/images文件夹以外的图片。
所以作了一个下实验:
大概的code是这样的。

under image folder:
rails_tag:<%= image_tag '68.jpg', :size => "50x40" %>
html:<img src="/images/68.jpg" width="50" height="40" alt="Error" />
===================================================================
outside of image folder
rails_tag:<%= image_tag 'file:///home/auckland/demo/68.jpg', :size => "50x40" %>
html:<img src="file:///home/auckland/demo/68.jpg" width="50" height="40" alt="Error" />


图片68.jpg的修改权限在两个文件夹里面都是一样的。
结果,在images folder里面的图片都可以显示。
下面的就不行了。
奇怪的是当我用firebug查看html代码的时候,firebug却可以根据下面的那两个地址找到图片。
百思不得其解。

[b]我的解决方法:
在public/images/文件夹下,建立一个
soft symbolic link 到你要存储图片的文件夹就行了。
ln -s /original/file /new/link

就可以正常显示图片了。
无须修改任何ruby代码。[/b]

但是就是很奇怪啊。为什么放在其他地方firebug可以找到,image_tag就是不显示呢?
/** * Created by Jingyu Yan * @date 2024-10-01 */ #pragma once #ifndef INSPIREFACE_ANYNETADAPTER_H #define INSPIREFACE_ANYNETADAPTER_H #include <utility> #include <inspirecv/inspirecv.h> #include "data_type.h" #include "inference_wrapper/inference_wrapper.h" #include "configurable.h" #include "log.h" #include "model_archive/inspire_archive.h" #include "image_process/nexus_processor/image_processor.h" #include <launch.h> #include "system.h" namespace inspire { using AnyTensorOutputs = std::vector<std::pair<std::string, std::vector<float>>>; /** * @class AnyNet * @brief Generic neural network class for various inference tasks. * * This class provides a general interface for different types of neural networks, * facilitating loading parameters, initializing models, and executing forward passes. */ class INSPIRE_API AnyNetAdapter { CONFIGURABLE_SUPPORT public: /** * @brief Constructor for AnyNet. * @param name Name of the neural network. */ explicit AnyNetAdapter(std::string name) : m_name_(std::move(name)) { m_processor_ = nexus::ImageProcessor::Create(); } ~AnyNetAdapter() { m_nn_inference_->Finalize(); } /** * @brief Loads parameters and initializes the model for inference. * @param param Parameters for network configuration. * @param model Pointer to the model. * @param type Type of the inference helper (default: INFER_MNN). * @return int32_t Status of the loading and initialization process. */ int32_t LoadData(InspireModel &model, InferenceWrapper::EngineType type = InferenceWrapper::INFER_MNN, bool dynamic = false) { m_infer_type_ = type; // must pushData<int>(model.Config(), "model_index", 0); pushData<std::string>(model.Config(), "input_layer", ""); pushData<std::vector<std::string>>(model.Config(), "outputs_layers", { "", }); pushData<std::vector<int>>(model.Config(), "input_size", {320, 320}); pushData<std::vector<float>>(model.Config(), "mean", {127.5f, 127.5f, 127.5f}); pushData<std::vector<float>>(model.Config(), "norm", {0.0078125f, 0.0078125f, 0.0078125f}); // rarely pushData<int>(model.Config(), "input_channel", 3); pushData<int>(model.Config(), "input_image_channel", 3); pushData<bool>(model.Config(), "nchw", true); pushData<bool>(model.Config(), "swap_color", false); pushData<int>(model.Config(), "data_type", InputTensorInfo::InputTensorInfo::DataTypeImage); pushData<int>(model.Config(), "input_tensor_type", InputTensorInfo::TensorInfo::TensorTypeFp32); pushData<int>(model.Config(), "output_tensor_type", InputTensorInfo::TensorInfo::TensorTypeFp32); pushData<int>(model.Config(), "infer_backend", 0); pushData<int>(model.Config(), "threads", 1); m_nn_inference_.reset(InferenceWrapper::Create(m_infer_type_)); m_nn_inference_->SetNumThreads(getData<int>("threads")); if (m_infer_type_ == InferenceWrapper::INFER_TENSORRT) { m_nn_inference_->SetDevice(INSPIREFACE_CONTEXT->GetCudaDeviceId()); } #if defined(ISF_GLOBAL_INFERENCE_BACKEND_USE_MNN_CUDA) && !defined(ISF_ENABLE_RKNN) INSPIRE_LOGW("You have forced the global use of MNN_CUDA as the neural network inference backend"); m_nn_inference_->SetSpecialBackend(InferenceWrapper::MMM_CUDA); #endif #if defined(ISF_ENABLE_APPLE_EXTENSION) if (INSPIREFACE_CONTEXT->GetGlobalCoreMLInferenceMode() == InferenceWrapper::COREML_CPU) { m_nn_inference_->SetSpecialBackend(InferenceWrapper::COREML_CPU); } else if (INSPIREFACE_CONTEXT->GetGlobalCoreMLInferenceMode() == InferenceWrapper::COREML_GPU) { m_nn_inference_->SetSpecialBackend(InferenceWrapper::COREML_GPU); } else if (INSPIREFACE_CONTEXT->GetGlobalCoreMLInferenceMode() == InferenceWrapper::COREML_ANE) { m_nn_inference_->SetSpecialBackend(InferenceWrapper::COREML_ANE); } #endif m_output_tensor_info_list_.clear(); std::vector<std::string> outputs_layers = getData<std::vector<std::string>>("outputs_layers"); int tensor_type = getData<int>("input_tensor_type"); int out_tensor_type = getData<int>("output_tensor_type"); for (auto &name : outputs_layers) { m_output_tensor_info_list_.push_back(OutputTensorInfo(name, out_tensor_type)); } int32_t ret; if (model.loadFilePath) { auto extensionPath = INSPIREFACE_CONTEXT->GetExtensionPath(); if (extensionPath.empty()) { INSPIRE_LOGE("Extension path is empty"); return InferenceWrapper::WrapperError; } std::string filePath = os::PathJoin(extensionPath, model.fullname); ret = m_nn_inference_->Initialize(filePath, m_input_tensor_info_list_, m_output_tensor_info_list_); } else { ret = m_nn_inference_->Initialize(model.buffer, model.bufferSize, m_input_tensor_info_list_, m_output_tensor_info_list_); } if (ret != InferenceWrapper::WrapperOk) { INSPIRE_LOGE("NN Initialize fail"); return ret; } if (ret != InferenceWrapper::WrapperOk) { INSPIRE_LOGE("NN Initialize fail"); return ret; } m_input_tensor_info_list_.clear(); InputTensorInfo input_tensor_info(getData<std::string>("input_layer"), tensor_type, getData<bool>("nchw")); std::vector<int> input_size = getData<std::vector<int>>("input_size"); int width = input_size[0]; int height = input_size[1]; m_input_image_size_ = {width, height}; int channel = getData<int>("input_channel"); if (getData<bool>("nchw")) { input_tensor_info.tensor_dims = {1, channel, m_input_image_size_.GetHeight(), m_input_image_size_.GetWidth()}; } else { input_tensor_info.tensor_dims = {1, m_input_image_size_.GetHeight(), m_input_image_size_.GetWidth(), channel}; } input_tensor_info.data_type = getData<int>("data_type"); int image_channel = getData<int>("input_image_channel"); input_tensor_info.image_info.channel = image_channel; std::vector<float> mean = getData<std::vector<float>>("mean"); std::vector<float> norm = getData<std::vector<float>>("norm"); input_tensor_info.normalize.mean[0] = mean[0]; input_tensor_info.normalize.mean[1] = mean[1]; input_tensor_info.normalize.mean[2] = mean[2]; input_tensor_info.normalize.norm[0] = norm[0]; input_tensor_info.normalize.norm[1] = norm[1]; input_tensor_info.normalize.norm[2] = norm[2]; input_tensor_info.image_info.width = width; input_tensor_info.image_info.height = height; input_tensor_info.image_info.channel = channel; input_tensor_info.image_info.crop_x = 0; input_tensor_info.image_info.crop_y = 0; input_tensor_info.image_info.crop_width = width; input_tensor_info.image_info.crop_height = height; input_tensor_info.image_info.is_bgr = getData<bool>("nchw"); input_tensor_info.image_info.swap_color = getData<bool>("swap_color"); m_input_tensor_info_list_.push_back(input_tensor_info); if (dynamic) { m_nn_inference_->ResizeInput(m_input_tensor_info_list_); } return 0; } void Forward(const inspirecv::Image &image, AnyTensorOutputs &outputs) { InputTensorInfo &input_tensor_info = getMInputTensorInfoList()[0]; if (m_infer_type_ == InferenceWrapper::INFER_RKNN) { if (getData<bool>("swap_color")) { m_cache_ = image.SwapRB(); input_tensor_info.data = (uint8_t *)m_cache_.Data(); } else { input_tensor_info.data = (uint8_t *)image.Data(); } } else { input_tensor_info.data = (uint8_t *)image.Data(); } Forward(outputs); } /** * @brief Performs a forward pass of the network. * @param outputs Outputs of the network (tensor outputs). */ void Forward(AnyTensorOutputs &outputs) { // LOGD("ppPreProcess"); if (m_nn_inference_->PreProcess(m_input_tensor_info_list_) != InferenceWrapper::WrapperOk) { INSPIRE_LOGD("PreProcess error"); } // LOGD("PreProcess"); if (m_nn_inference_->Process(m_output_tensor_info_list_) != InferenceWrapper::WrapperOk) { INSPIRE_LOGD("Process error"); } // LOGD("Process"); for (int i = 0; i < m_output_tensor_info_list_.size(); ++i) { std::vector<float> output_score_raw_list(m_output_tensor_info_list_[i].GetDataAsFloat(), m_output_tensor_info_list_[i].GetDataAsFloat() + m_output_tensor_info_list_[i].GetElementNum()); // LOGE("m_output_tensor_info_list_[i].GetElementNum(): %d",m_output_tensor_info_list_[i].GetElementNum()); outputs.push_back(std::make_pair(m_output_tensor_info_list_[i].name, output_score_raw_list)); } } public: /** * @brief Gets a reference to the input tensor information list. * @return Reference to the vector of input tensor information. */ std::vector<InputTensorInfo> &getMInputTensorInfoList() { return m_input_tensor_info_list_; } /** * @brief Gets a reference to the output tensor information list. * @return Reference to the vector of output tensor information. */ std::vector<OutputTensorInfo> &getMOutputTensorInfoList() { return m_output_tensor_info_list_; } /** * @brief Gets the size of the input image. * @return Size of the input image. */ inspirecv::Size<int> &getMInputImageSize() { return m_input_image_size_; } /** * @brief Softmax function. * * @param input The input vector. * @return The softmax result. */ static std::vector<float> Softmax(const std::vector<float> &input) { std::vector<float> result; float sum = 0.0; // Calculate the exponentials and the sum of exponentials for (float x : input) { float exp_x = std::exp(x); result.push_back(exp_x); sum += exp_x; } // Normalize by dividing each element by the sum for (float &value : result) { value /= sum; } return result; } protected: std::string m_name_; ///< Name of the neural network. std::unique_ptr<nexus::ImageProcessor> m_processor_; ///< Assign a nexus processor to each anynet object private: InferenceWrapper::EngineType m_infer_type_; ///< Inference engine type std::shared_ptr<InferenceWrapper> m_nn_inference_; ///< Shared pointer to the inference helper. std::vector<InputTensorInfo> m_input_tensor_info_list_; ///< List of input tensor information. std::vector<OutputTensorInfo> m_output_tensor_info_list_; ///< List of output tensor information. inspirecv::Size<int> m_input_image_size_{}; ///< Size of the input image. inspirecv::Image m_cache_; ///< Cached matrix for image data. }; template <typename ImageT, typename TensorT> AnyTensorOutputs ForwardService(std::shared_ptr<AnyNetAdapter> net, const ImageT &input, std::function<void(const ImageT &, TensorT &)> transform) { InputTensorInfo &input_tensor_info = net->getMInputTensorInfoList()[0]; TensorT transform_tensor; transform(input, transform_tensor); input_tensor_info.data = transform_tensor.data; // input tensor only support cv2::Mat AnyTensorOutputs outputs; net->Forward(outputs); return outputs; } /** * @brief Executes a forward pass through the neural network for a given input, with preprocessing. * @tparam ImageT Type of the input image. * @tparam TensorT Type of the transformed tensor. * @tparam PreprocessCallbackT Type of the preprocessing callback function. * @param net Shared pointer to the AnyNet neural network object. * @param input The input image to be processed. * @param callback Preprocessing callback function to be applied to the input. * @param transform Transformation function to convert the input image to a tensor. * @return AnyTensorOutputs Outputs of the network (tensor outputs). * * This template function handles the preprocessing of the input image, transformation to tensor, * and then passes it through the neural network to get the output. The function is generic and * can work with different types of images and tensors, as specified by the template parameters. */ template <typename ImageT, typename TensorT, typename PreprocessCallbackT> AnyTensorOutputs ForwardService(std::shared_ptr<AnyNetAdapter> net, const ImageT &input, PreprocessCallbackT &callback, std::function<void(const ImageT &, TensorT &, PreprocessCallbackT &)> transform) { InputTensorInfo &input_tensor_info = net->getMInputTensorInfoList()[0]; TensorT transform_tensor; transform(input, transform_tensor, callback); input_tensor_info.data = transform_tensor.data; // input tensor only support cv2::Mat AnyTensorOutputs outputs; net->Forward(outputs); return outputs; } } // namespace inspire #endif // INSPIREFACE_ANYNETADAPTER_H
09-13
如下是java 项目的 CICD配置,推送地址IP的服务器账户密码在那里设置?stages: - package - deploy(uat) - deploy(dev) - deploy(prod) variables: # 要推送的整个镜像名和版本号 MY_IMAGE: "${PROJECT_NAME}:${CI_COMMIT_SHA}" HOST_UAT: s-mes@10.2.219.131 HOST_DEV: s-mes@10.2.219.133 HOST_PROD: s-mes@10.2.219.34 DEPLOY_PATH: /home/s-mes/ds/auth package-job: stage: package image: docker:stable timeout: 30 min tags: - ds script: - echo 'Build Docker Image' - docker build -t ${MY_IMAGE} . - echo 'Docker tag then push to repository' - docker tag ${MY_IMAGE} ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} - docker login ${RAMAXEL_NEXUS_HOST} -u ${RAMAXEL_NEXUS_USER_NAME} -p ${RAMAXEL_NEXUS_PASSWD} - docker push ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} - echo 'Delete Docker Images' - docker rmi ${MY_IMAGE} deploy-job-uat: stage: deploy(uat) image: docker:stable only: - develop when: manual timeout: 30 min tags: - ds script: - echo 'Connect Target Host' - chmod 600 $SSH_KEY - eval $(ssh-agent -s) - ssh-add $SSH_KEY - mkdir /root/.ssh - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config' - echo 'Pull Docker Images To Target Host' - ssh -i $SSH_KEY $HOST_UAT "sudo docker tag ${PROJECT_NAME}:latest ${PROJECT_NAME}:previous && sudo docker pull ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} && sudo docker tag ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} ${RAMAXEL_NEXUS_HOST}/${PROJECT_NAME}:latest" - echo 'Copy docker compose configuration' - scp -i $SSH_KEY docker-compose.yaml $HOST_UAT:${DEPLOY_PATH}/docker-compose.yaml - echo 'Up Docker Images' - ssh -i $SSH_KEY $HOST_UAT "cd ${DEPLOY_PATH} && sudo docker compose down && PARAMS='-Dspring.profiles.active=uat -Dspring.cloud.nacos.config.namespace=691d5da1-1e07-4596-a517-ce1e5b39abd2 -Dspring.cloud.nacos.discovery.namespace=691d5da1-1e07-4596-a517-ce1e5b39abd2' sudo -E docker compose up -d" - ssh -i $SSH_KEY $HOST_UAT "sudo docker image prune -f" deploy-job-dev: stage: deploy(dev) image: docker:stable only: - develop - /^feature-.*$/ - /^bug-.*$/ timeout: 30 min tags: - ds when: manual script: - echo 'Connect Target Host' - chmod 600 $SSH_KEY - eval $(ssh-agent -s) - ssh-add $SSH_KEY - mkdir /root/.ssh - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config' - echo 'Pull Docker Images To Target Host' - ssh -i $SSH_KEY $HOST_DEV "sudo docker tag ${PROJECT_NAME}:latest ${PROJECT_NAME}:previous && sudo docker pull ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} && sudo docker tag ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} ${RAMAXEL_NEXUS_HOST}/${PROJECT_NAME}:latest" - echo 'Copy docker compose configuration' - scp -i $SSH_KEY docker-compose.yaml $HOST_DEV:${DEPLOY_PATH}/docker-compose.yaml - echo 'Up Docker Images' - ssh -i $SSH_KEY $HOST_DEV "cd ${DEPLOY_PATH} && sudo docker compose down && PARAMS='-Dspring.profiles.active=dev -Dspring.cloud.nacos.config.namespace=7310f333-9228-4b68-898a-04990202beb6 -Dspring.cloud.nacos.discovery.namespace=7310f333-9228-4b68-898a-04990202beb6' sudo -E docker compose up -d" - ssh -i $SSH_KEY $HOST_DEV "sudo docker image prune" deploy-job-prod: stage: deploy(prod) image: docker:stable only: - master timeout: 30 min tags: - ds when: manual script: - echo 'Connect Target Host' - chmod 600 $SSH_KEY - eval $(ssh-agent -s) - ssh-add $SSH_KEY - mkdir /root/.ssh - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config' - echo 'Pull Docker Images To Target Host' - ssh -i $SSH_KEY $HOST_PROD "sudo docker tag ${PROJECT_NAME}:latest ${PROJECT_NAME}:previous && sudo docker pull ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} && sudo docker tag ${RAMAXEL_NEXUS_HOST}/${MY_IMAGE} ${RAMAXEL_NEXUS_HOST}/${PROJECT_NAME}:latest" - echo 'Copy docker compose configuration' - scp -i $SSH_KEY docker-compose-prod.yaml $HOST_PROD:${DEPLOY_PATH}/docker-compose.yaml - echo 'Up Docker Images' - ssh -i $SSH_KEY $HOST_PROD "cd ${DEPLOY_PATH} && sudo docker compose down && sudo docker compose up -d" - ssh -i $SSH_KEY $HOST_PROD "sudo docker image prune"
最新发布
11-07
[app] # (str) Title of your application title = 岷县西江镇八娘小学_2022级四年级(1)班随机点名器 # (str) Package name package.name = mnxbnxq_2022_grade4_class1_random # (str) Package domain (needed for android/ios packaging) package.domain = tnyx.app.org.test # (str) Source code where the main.py live source.dir = . # (list) Source files to include (let empty to include all the files) source.include_exts = py,png,jpg,kv,atlas # (list) List of inclusions using pattern matching #source.include_patterns = assets/*,images/*.png # (list) Source files to exclude (let empty to not exclude anything) #source.exclude_exts = spec # (list) List of directory to exclude (let empty to not exclude anything) #source.exclude_dirs = tests, bin, venv # (list) List of exclusions using pattern matching # Do not prefix with './' #source.exclude_patterns = license,images/*/*.jpg # (str) Application versioning (method 1) version = 0.1 # (str) Application versioning (method 2) # version.regex = __version__ = ['"](.*)['"] # version.filename = %(source.dir)s/main.py # (list) Application requirements # comma separated e.g. requirements = sqlite3,kivy requirements = python3,kivy # 设置pip超时为300秒(可根据网络情况调整) requirements.pip_options = --timeout=3000 # (str) Custom source folders for requirements # Sets custom source for any requirements with recipes # requirements.source.kivy = ../../kivy # (str) Presplash of the application #presplash.filename = %(source.dir)s/data/presplash.png # (str) Icon of the application #icon.filename = %(source.dir)s/data/icon.png # (list) Supported orientations # Valid options are: landscape, portrait, portrait-reverse or landscape-reverse orientation = portrait # (list) List of service to declare #services = NAME:ENTRYPOINT_TO_PY,NAME2:ENTRYPOINT2_TO_PY # # OSX Specific # # # author = © Copyright Info # change the major version of python used by the app osx.python_version = 3 # Kivy version to use osx.kivy_version = 1.9.1 # # Android specific # # (bool) Indicate if the application should be fullscreen or not fullscreen = 0 # (string) Presplash background color (for android toolchain) # Supported formats are: #RRGGBB #AARRGGBB or one of the following names: # red, blue, green, black, white, gray, cyan, magenta, yellow, lightgray, # darkgray, grey, lightgrey, darkgrey, aqua, fuchsia, lime, maroon, navy, # olive, purple, silver, teal. #android.presplash_color = #FFFFFF # (string) Presplash animation using Lottie format. # see https://lottiefiles.com/ for examples and https://airbnb.design/lottie/ # for general documentation. # Lottie files can be created using various tools, like Adobe After Effect or Synfig. #android.presplash_lottie = "path/to/lottie/file.json" # (str) Adaptive icon of the application (used if Android API level is 26+ at runtime) #icon.adaptive_foreground.filename = %(source.dir)s/data/icon_fg.png #icon.adaptive_background.filename = %(source.dir)s/data/icon_bg.png # (list) Permissions # (See https://python-for-android.readthedocs.io/en/latest/buildoptions/#build-options-1 for all the supported syntaxes and properties) #android.permissions = android.permission.INTERNET, (name=android.permission.WRITE_EXTERNAL_STORAGE;maxSdkVersion=18) # (list) features (adds uses-feature -tags to manifest) #android.features = android.hardware.usb.host # (int) Target Android API, should be as high as possible. #android.api = 31 # (int) Minimum API your APK / AAB will support. #android.minapi = 21 # (int) Android SDK version to use #android.sdk = 20 # (str) Android NDK version to use #android.ndk = 23b # (int) Android NDK API to use. This is the minimum API your app will support, it should usually match android.minapi. #android.ndk_api = 21 # (bool) Use --private data storage (True) or --dir public storage (False) #android.private_storage = True # (str) Android NDK directory (if empty, it will be automatically downloaded.) #android.ndk_path = # (str) Android SDK directory (if empty, it will be automatically downloaded.) #android.sdk_path = # (str) ANT directory (if empty, it will be automatically downloaded.) #android.ant_path = # (bool) If True, then skip trying to update the Android sdk # This can be useful to avoid excess Internet downloads or save time # when an update is due and you just want to test/build your package # android.skip_update = False # (bool) If True, then automatically accept SDK license # agreements. This is intended for automation only. If set to False, # the default, you will be shown the license when first running # buildozer. # android.accept_sdk_license = False # (str) Android entry point, default is ok for Kivy-based app #android.entrypoint = org.kivy.android.PythonActivity # (str) Full name including package path of the Java class that implements Android Activity # use that parameter together with android.entrypoint to set custom Java class instead of PythonActivity #android.activity_class_name = org.kivy.android.PythonActivity # (str) Extra xml to write directly inside the <manifest> element of AndroidManifest.xml # use that parameter to provide a filename from where to load your custom XML code #android.extra_manifest_xml = ./src/android/extra_manifest.xml # (str) Extra xml to write directly inside the <manifest><application> tag of AndroidManifest.xml # use that parameter to provide a filename from where to load your custom XML arguments: #android.extra_manifest_application_arguments = ./src/android/extra_manifest_application_arguments.xml # (str) Full name including package path of the Java class that implements Python Service # use that parameter to set custom Java class which extends PythonService #android.service_class_name = org.kivy.android.PythonService # (str) Android app theme, default is ok for Kivy-based app # android.apptheme = "@android:style/Theme.NoTitleBar" # (list) Pattern to whitelist for the whole project #android.whitelist = # (str) Path to a custom whitelist file #android.whitelist_src = # (str) Path to a custom blacklist file #android.blacklist_src = # (list) List of Java .jar files to add to the libs so that pyjnius can access # their classes. Don't add jars that you do not need, since extra jars can slow # down the build process. Allows wildcards matching, for example: # OUYA-ODK/libs/*.jar #android.add_jars = foo.jar,bar.jar,path/to/more/*.jar # (list) List of Java files to add to the android project (can be java or a # directory containing the files) #android.add_src = # (list) Android AAR archives to add #android.add_aars = # (list) Put these files or directories in the apk assets directory. # Either form may be used, and assets need not be in 'source.include_exts'. # 1) android.add_assets = source_asset_relative_path # 2) android.add_assets = source_asset_path:destination_asset_relative_path #android.add_assets = # (list) Put these files or directories in the apk res directory. # The option may be used in three ways, the value may contain one or zero ':' # Some examples: # 1) A file to add to resources, legal resource names contain ['a-z','0-9','_'] # android.add_resources = my_icons/all-inclusive.png:drawable/all_inclusive.png # 2) A directory, here 'legal_icons' must contain resources of one kind # android.add_resources = legal_icons:drawable # 3) A directory, here 'legal_resources' must contain one or more directories, # each of a resource kind: drawable, xml, etc... # android.add_resources = legal_resources #android.add_resources = # (list) Gradle dependencies to add #android.gradle_dependencies = # (bool) Enable AndroidX support. Enable when 'android.gradle_dependencies' # contains an 'androidx' package, or any package from Kotlin source. # android.enable_androidx requires android.api >= 28 #android.enable_androidx = True # (list) add java compile options # this can for example be necessary when importing certain java libraries using the 'android.gradle_dependencies' option # see https://developer.android.com/studio/write/java8-support for further information # android.add_compile_options = "sourceCompatibility = 1.8", "targetCompatibility = 1.8" # (list) Gradle repositories to add {can be necessary for some android.gradle_dependencies} # please enclose in double quotes # e.g. android.gradle_repositories = "maven { url 'https://kotlin.bintray.com/ktor' }" #android.add_gradle_repositories = # (list) packaging options to add # see https://google.github.io/android-gradle-dsl/current/com.android.build.gradle.internal.dsl.PackagingOptions.html # can be necessary to solve conflicts in gradle_dependencies # please enclose in double quotes # e.g. android.add_packaging_options = "exclude 'META-INF/common.kotlin_module'", "exclude 'META-INF/*.kotlin_module'" #android.add_packaging_options = # (list) Java classes to add as activities to the manifest. #android.add_activities = com.example.ExampleActivity # (str) OUYA Console category. Should be one of GAME or APP # If you leave this blank, OUYA support will not be enabled #android.ouya.category = GAME # (str) Filename of OUYA Console icon. It must be a 732x412 png image. #android.ouya.icon.filename = %(source.dir)s/data/ouya_icon.png # (str) XML file to include as an intent filters in <activity> tag #android.manifest.intent_filters = # (list) Copy these files to src/main/res/xml/ (used for example with intent-filters) #android.res_xml = PATH_TO_FILE, # (str) launchMode to set for the main activity #android.manifest.launch_mode = standard # (str) screenOrientation to set for the main activity. # Valid values can be found at https://developer.android.com/guide/topics/manifest/activity-element #android.manifest.orientation = fullSensor # (list) Android additional libraries to copy into libs/armeabi #android.add_libs_armeabi = libs/android/*.so #android.add_libs_armeabi_v7a = libs/android-v7/*.so #android.add_libs_arm64_v8a = libs/android-v8/*.so #android.add_libs_x86 = libs/android-x86/*.so #android.add_libs_mips = libs/android-mips/*.so # (bool) Indicate whether the screen should stay on # Don't forget to add the WAKE_LOCK permission if you set this to True #android.wakelock = False # (list) Android application meta-data to set (key=value format) #android.meta_data = # (list) Android library project to add (will be added in the # project.properties automatically.) #android.library_references = # (list) Android shared libraries which will be added to AndroidManifest.xml using <uses-library> tag #android.uses_library = # (str) Android logcat filters to use #android.logcat_filters = *:S python:D # (bool) Android logcat only display log for activity's pid #android.logcat_pid_only = False # (str) Android additional adb arguments #android.adb_args = -H host.docker.internal # (bool) Copy library instead of making a libpymodules.so #android.copy_libs = 1 # (list) The Android archs to build for, choices: armeabi-v7a, arm64-v8a, x86, x86_64 # In past, was `android.arch` as we weren't supporting builds for multiple archs at the same time. android.archs = arm64-v8a, armeabi-v7a # (int) overrides automatic versionCode computation (used in build.gradle) # this is not the same as app version and should only be edited if you know what you're doing # android.numeric_version = 1 # (bool) enables Android auto backup feature (Android API >=23) android.allow_backup = True # (str) XML file for custom backup rules (see official auto backup documentation) # android.backup_rules = # (str) If you need to insert variables into your AndroidManifest.xml file, # you can do so with the manifestPlaceholders property. # This property takes a map of key-value pairs. (via a string) # Usage example : android.manifest_placeholders = [myCustomUrl:\"org.kivy.customurl\"] # android.manifest_placeholders = [:] # (bool) Skip byte compile for .py files # android.no-byte-compile-python = False # (str) The format used to package the app for release mode (aab or apk or aar). # android.release_artifact = aab # (str) The format used to package the app for debug mode (apk or aar). # android.debug_artifact = apk # # Python for android (p4a) specific # # (str) python-for-android URL to use for checkout #p4a.url = # (str) python-for-android fork to use in case if p4a.url is not specified, defaults to upstream (kivy) #p4a.fork = kivy # (str) python-for-android branch to use, defaults to master #p4a.branch = master # (str) python-for-android specific commit to use, defaults to HEAD, must be within p4a.branch #p4a.commit = HEAD # (str) python-for-android git clone directory (if empty, it will be automatically cloned from github) #p4a.source_dir = # (str) The directory in which python-for-android should look for your own build recipes (if any) #p4a.local_recipes = # (str) Filename to the hook for p4a #p4a.hook = # (str) Bootstrap to use for android builds # p4a.bootstrap = sdl2 # (int) port number to specify an explicit --port= p4a argument (eg for bootstrap flask) #p4a.port = # Control passing the --use-setup-py vs --ignore-setup-py to p4a # "in the future" --use-setup-py is going to be the default behaviour in p4a, right now it is not # Setting this to false will pass --ignore-setup-py, true will pass --use-setup-py # NOTE: this is general setuptools integration, having pyproject.toml is enough, no need to generate # setup.py if you're using Poetry, but you need to add "toml" to source.include_exts. #p4a.setup_py = false # (str) extra command line arguments to pass when invoking pythonforandroid.toolchain #p4a.extra_args = # # iOS specific # # (str) Path to a custom kivy-ios folder #ios.kivy_ios_dir = ../kivy-ios # Alternately, specify the URL and branch of a git checkout: ios.kivy_ios_url = https://github.com/kivy/kivy-ios ios.kivy_ios_branch = master # Another platform dependency: ios-deploy # Uncomment to use a custom checkout #ios.ios_deploy_dir = ../ios_deploy # Or specify URL and branch ios.ios_deploy_url = https://github.com/phonegap/ios-deploy ios.ios_deploy_branch = 1.10.0 # (bool) Whether or not to sign the code ios.codesign.allowed = false # (str) Name of the certificate to use for signing the debug version # Get a list of available identities: buildozer ios list_identities #ios.codesign.debug = "iPhone Developer: <lastname> <firstname> (<hexstring>)" # (str) The development team to use for signing the debug version #ios.codesign.development_team.debug = <hexstring> # (str) Name of the certificate to use for signing the release version #ios.codesign.release = %(ios.codesign.debug)s # (str) The development team to use for signing the release version #ios.codesign.development_team.release = <hexstring> # (str) URL pointing to .ipa file to be installed # This option should be defined along with `display_image_url` and `full_size_image_url` options. #ios.manifest.app_url = # (str) URL pointing to an icon (57x57px) to be displayed during download # This option should be defined along with `app_url` and `full_size_image_url` options. #ios.manifest.display_image_url = # (str) URL pointing to a large icon (512x512px) to be used by iTunes # This option should be defined along with `app_url` and `display_image_url` options. #ios.manifest.full_size_image_url = ###[app] # =============== SDL镜像加速 =============== # SDL主库镜像(替换引用[1]的GitHub源) #sdl2.url = http://mirrors.ustc.edu.cn/sdl/release/SDL2-{version}.tar.gz # SDL_image镜像(解决卡顿问题) #sdl2.image.url = http://mirrors.ustc.edu.cn/sdl_image/release/SDL2_image-{version}.tar.gz # SDL_gfx镜像(替换引用[2]的问题源) #sdl2.gfx.url = http://mirrors.ustc.edu.cn/sdl_gfx/release/SDL2_gfx-{version}.tar.gz #[app] # 主库(参考引用[1]最新稳定版) sdl2.url = http://mirrors.ustc.edu.cn/sdl/release/SDL2-2.30.3.tar.gz # 图像库(参考引用[2]版本) sdl2.image.url = http://mirrors.ustc.edu.cn/sdl_image/release/SDL2_image-2.8.0.tar.gz # 图形扩展库 sdl2.gfx.url = http://mirrors.ustc.edu.cn/sdl_gfx/release/SDL2_gfx-1.0.4.tar.gz # 字体扩展库 sdl2_ttf.url = http://mirrors.ustc.edu.cn/sdl_ttf/release/SDL2_ttf-2.20.2.tar.gz ### [android] # 替换默认的GitHub源为国内镜像(阿里云+清华源) recipe_urls = https://mirrors.aliyun.com/pypi/simple/, https://pypi.tuna.tsinghua.edu.cn/simple/ # 启用本地缓存(避免重复下载) cache_dir = ~/.buildozer/android/cache cache_validity = 30 # 缓存有效期30天 [buildozer] # (str) Creator creator = 霆霓益行 # (int) Log level (0 = error only, 1 = info, 2 = debug (with command output)) log_level = 2 # (int) Display warning if buildozer is run as root (0 = False, 1 = True) warn_on_root = 1 # (str) Path to build artifact storage, absolute or relative to spec file # build_dir = ./.buildozer # (str) Path to build output (i.e. .apk, .aab, .ipa) storage # bin_dir = ./bin ### # 设置国内PyPI镜像源 pypi_mirror = https://pypi.tuna.tsinghua.edu.cn/simple # Android SDK镜像源(腾讯云) android.sdk_mirror = https://mirrors.cloud.tencent.com/AndroidSDK/ # Android NDK镜像源 android.ndk_mirror = https://mirrors.cloud.tencent.com/AndroidSDK/ndk-bundle/ # 自动接受SDK许可证(避免卡在确认界面) android.accept_license = yes ### # 设置清华镜像源 android.mirror.android_sdk_repo = https://mirrors.tuna.tsinghua.edu.cn/android/repository/ android.mirror.gradle = https://mirrors.cloud.tencent.com/gradle/ # 设置阿里云Maven镜像 android.gradle_repository = https://maven.aliyun.com/repository/public ###[buildozer] # 替换 pip 源为国内镜像 pip_index_url = https://pypi.tuna.tsinghua.edu.cn/simple # 替换 GitHub 仓库为国内镜像(如 Gitee) android.p4a_fork = https://gitee.com/mirrors/python-for-android.git # ----------------------------------------------------------------------------- # List as sections # # You can define all the "list" as [section:key]. # Each line will be considered as a option to the list. # Let's take [app] / source.exclude_patterns. # Instead of doing: # #[app] #source.exclude_patterns = license,data/audio/*.wav,data/images/original/* # # This can be translated into: # #[app:source.exclude_patterns] #license #data/audio/*.wav #data/images/original/* # # ----------------------------------------------------------------------------- # Profiles # # You can extend section / key with a profile # For example, you want to deploy a demo version of your application without # HD content. You could first change the title to add "(demo)" in the name # and extend the excluded directories to remove the HD content. # #[app@demo] #title = My Application (demo) # #[app:source.exclude_patterns@demo] #images/hd/* # # Then, invoke the command line with the "demo" profile: # #buildozer --profile demo android debug 能否实现从国内镜像源下载所有所需文件
09-30
package com.afarmgenius.aFarm.tool; import static android.view.Gravity.CENTER; import android.Manifest; import android.app.Activity; import android.content.Intent; import android.content.pm.ActivityInfo; import android.content.pm.PackageManager; import android.net.Uri; import android.os.Build; import android.provider.Settings; import android.util.Log; import android.view.View; import android.widget.Button; import android.widget.TextView; import androidx.core.app.ActivityCompat; import androidx.core.content.ContextCompat; import com.afarmgenius.aFarm.R; import com.luck.picture.lib.PictureSelector; import com.luck.picture.lib.config.PictureConfig; import com.luck.picture.lib.config.PictureMimeType; import com.luck.picture.lib.permissions.PermissionChecker; // 注意:PictureSelector自带的PermissionChecker import com.tencent.mmkv.MMKV; //import com.yanzhenjie.permission.AndPermission; import java.util.ArrayList; import java.util.Objects; import com.afarmgenius.aFarm.view.MyDialog; import org.greenrobot.eventbus.EventBus; import org.greenrobot.eventbus.Subscribe; import org.greenrobot.eventbus.ThreadMode; //import pub.devrel.easypermissions.AfterPermissionGranted; //import pub.devrel.easypermissions.EasyPermissions; //import pub.devrel.easypermissions.PermissionRequest; // implements EasyPermissions.PermissionCallbacks //相机的调用 public class UploadImage { private static final int RC_CAMERA_ALL_PERM = 120; // --- 移除旧的混合权限数组 --- // private static final String[] PERMISSION_CAMERA = ... // private static final String[] PERMISSION_CAMERA_ = ... private MyDialog myPermissionDialog; public Activity activity; public UploadImage(Activity activity) { this.activity = activity; // locationAndContactsTask(activity); // original(); } public void getPictureSelectorTouxiang() { // 检查相册权限 if (hasCameraGalleryPermissions()) { PictureSelector.create(activity) .openGallery(PictureMimeType.ofImage()) .loadImageEngine(GlideEngine.createGlideEngine()) .selectionMode(PictureConfig.SINGLE) .isPageStrategy(false) .isWeChatStyle(true) .isCamera(true) // 列表是否显示拍照按钮 .isEnableCrop(true) .cropImageWideHigh(200, 200) .withAspectRatio(1, 1) .freeStyleCropEnabled(false) .rotateEnabled(false) .scaleEnabled(true) .isDragFrame(true) .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT) .forResult(PictureConfig.CHOOSE_REQUEST); } else { // 请求相册权限 cameraGalleryPermissions(1, -1); } } public void getPictureSelectorTouxiangPhoto() { if (hasCameraPhotoPermissions()) { // 检查相机权限 PictureSelector.create(activity) .openCamera(PictureMimeType.ofImage()) .isEnableCrop(true) .cropImageWideHigh(200, 200) .withAspectRatio(1, 1) .freeStyleCropEnabled(false) .rotateEnabled(false) .scaleEnabled(true) .isDragFrame(true) .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT) .forResult(PictureConfig.REQUEST_CAMERA); } else { // 请求相机权限 cameraPhotoPermissions(7, -1); } } public void getPictureSelectorTouxiangGallery() { if (hasCameraGalleryPermissions()) { PictureSelector.create(activity) .openGallery(PictureMimeType.ofImage()) .loadImageEngine(GlideEngine.createGlideEngine()) .selectionMode(PictureConfig.SINGLE) .isPageStrategy(false) .isWeChatStyle(true) .isCamera(false) .isEnableCrop(true) .cropImageWideHigh(200, 200) .withAspectRatio(1, 1) .freeStyleCropEnabled(false) .rotateEnabled(false) .scaleEnabled(true) .isDragFrame(true) .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT) .forResult(PictureConfig.CHOOSE_REQUEST); } else { cameraGalleryPermissions(8, -1); } } public void getPictureSelector(int int_image_max) { if (hasCameraGalleryPermissions()) { PictureSelector.create(activity) .openGallery(PictureMimeType.ofImage()) .loadImageEngine(GlideEngine.createGlideEngine()) .selectionMode(PictureConfig.MULTIPLE) .isPageStrategy(false) .isWeChatStyle(true) .isCamera(false) .maxSelectNum(int_image_max) .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT) .forResult(PictureConfig.CHOOSE_REQUEST); } else { cameraGalleryPermissions(2, int_image_max); } } public void getPictureSelectorFalse(int int_image_max) { // 仅相册 if (hasCameraGalleryPermissions()) { PictureSelector.create(activity) .openGallery(PictureMimeType.ofImage()) .loadImageEngine(GlideEngine.createGlideEngine()) .selectionMode(PictureConfig.MULTIPLE) .isPageStrategy(false) .isWeChatStyle(true) .isCamera(false) .maxSelectNum(int_image_max) .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT) .forResult(PictureConfig.CHOOSE_REQUEST); } else { cameraGalleryPermissions(3, int_image_max); } } public void getPictureSelectorNO() { if (hasCameraGalleryPermissions()) { // 检查相册权限 PictureSelector.create(activity) .openGallery(PictureMimeType.ofImage()) .loadImageEngine(GlideEngine.createGlideEngine()) .selectionMode(PictureConfig.SINGLE) .isPageStrategy(false) .isWeChatStyle(true) .isCamera(false) .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT) .forResult(PictureConfig.CHOOSE_REQUEST); } else { // 请求相册权限 cameraGalleryPermissions(4, -1); } } public void getPictureCamera(int int_image_max) { if (hasCameraPhotoPermissions()) { // 检查相机权限 PictureSelector.create(activity) .openCamera(PictureMimeType.ofImage()) .setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT) .loadImageEngine(GlideEngine.createGlideEngine()) .forResult(PictureConfig.REQUEST_CAMERA); } else { // 请求相机权限 cameraPhotoPermissions(5, int_image_max); } } public void getPictureSelector_no_RequestedOrientation(int int_image_max) { if (hasCameraGalleryPermissions()) { // 检查相册权限 PictureSelector.create(activity) .openGallery(PictureMimeType.ofImage()) .loadImageEngine(GlideEngine.createGlideEngine()) .selectionMode(PictureConfig.MULTIPLE) .isPageStrategy(false) .isWeChatStyle(true) .isCamera(true) .maxSelectNum(int_image_max) .forResult(PictureConfig.CHOOSE_REQUEST); } else { // 请求相册权限 cameraGalleryPermissions(6, int_image_max); } } // --- 新增:动态获取相机权限数组 --- private String[] getCameraPermissionsForRequest() { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) { // Android 13+ 主要需要 CAMERA // 如果拍照后需要写入公共目录,可能需要 MANAGE_EXTERNAL_STORAGE (特殊权限,需要用户手动开启) // 通常拍照图片保存在应用私有目录,不需要额外权限 return new String[]{ Manifest.permission.CAMERA }; } else { // Android 12 及以下,需要 CAMERA 和存储权限 return new String[]{ Manifest.permission.CAMERA, Manifest.permission.READ_EXTERNAL_STORAGE, Manifest.permission.WRITE_EXTERNAL_STORAGE }; } } // --- 新增:动态获取相册权限数组 --- private String[] getGalleryPermissionsForRequest() { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) { // Android 13+ 使用新的媒体权限 return new String[]{ Manifest.permission.READ_MEDIA_IMAGES, Manifest.permission.READ_MEDIA_VIDEO // 如果需要访问视频也加上 // Manifest.permission.READ_MEDIA_AUDIO // 如果需要访问音频也加上 }; } else { // Android 12 及以下,使用旧的存储权限 return new String[]{ Manifest.permission.READ_EXTERNAL_STORAGE, Manifest.permission.WRITE_EXTERNAL_STORAGE }; } } // --- 修正:相机权限检查 --- private boolean hasCameraPhotoPermissions() { // 摄像头 // 使用 PictureSelector 的 PermissionChecker boolean hasCamera = PermissionChecker.checkSelfPermission(activity, Manifest.permission.CAMERA); // 对于 Android 13+,相机本身可能不需要存储权限,但如果拍照后需要访问相册或写入公共目录,则需要相册权限或特殊权限 // 这里只检查相机权限本身 return hasCamera; } // --- 修正:相册权限检查 --- private boolean hasCameraGalleryPermissions() { // 相册 if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) { // Android 13+ 检查新的媒体权限 // 使用 PictureSelector 的 PermissionChecker return PermissionChecker.checkSelfPermission(activity, Manifest.permission.READ_MEDIA_IMAGES) && PermissionChecker.checkSelfPermission(activity, Manifest.permission.READ_MEDIA_VIDEO); } else { // Android 12 及以下检查旧的存储权限 // 使用 PictureSelector 的 PermissionChecker return PermissionChecker.checkSelfPermission(activity, Manifest.permission.READ_EXTERNAL_STORAGE) && PermissionChecker.checkSelfPermission(activity, Manifest.permission.WRITE_EXTERNAL_STORAGE); } } // --- 新增:相机权限请求对话框 --- public void cameraPhotoPermissions(int toDo, int intImageMax) { toDo_ = toDo; intImageMax_ = intImageMax; myPermissionDialog = new MyDialog(activity); View view_dialog = myPermissionDialog.getDialog(R.layout.my_dialog_permission, CENTER, true, true); TextView tv_content = view_dialog.findViewById(R.id.tv_content); Button btn_cancel = view_dialog.findViewById(R.id.btn_cancel); Button btn_commit = view_dialog.findViewById(R.id.btn_commit); btn_cancel.setText("取消"); tv_content.setText(activity.getString(R.string.update_camera_photo_tips)); // 使用相机提示 btn_cancel.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if (myPermissionDialog != null) { myPermissionDialog.getDialogquite(); } } }); btn_commit.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if (myPermissionDialog != null) { myPermissionDialog.getDialogquite(); } // 使用动态获取的相机权限数组 String[] permissionsToRequest = getCameraPermissionsForRequest(); EventBus.getDefault().postSticky(new PermissionRequestEvent(permissionsToRequest, REQUEST_TAG_CAMERA)); } }); } // --- 修正:相册权限请求对话框 --- public void cameraGalleryPermissions(int toDo, int intImageMax) { toDo_ = toDo; intImageMax_ = intImageMax; myPermissionDialog = new MyDialog(activity); View view_dialog = myPermissionDialog.getDialog(R.layout.my_dialog_permission, CENTER, true, true); TextView tv_content = view_dialog.findViewById(R.id.tv_content); Button btn_cancel = view_dialog.findViewById(R.id.btn_cancel); btn_cancel.setText(activity.getString(R.string.text_cancel)); Button btn_commit = view_dialog.findViewById(R.id.btn_commit); tv_content.setText(activity.getString(R.string.update_camera_Gallery_tips)); // 使用相册提示 btn_cancel.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if (myPermissionDialog != null) { myPermissionDialog.getDialogquite(); } } }); btn_commit.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if (myPermissionDialog != null) { myPermissionDialog.getDialogquite(); } // 使用动态获取的相册权限数组 String[] permissionsToRequest = getGalleryPermissionsForRequest(); EventBus.getDefault().postSticky(new PermissionRequestEvent(permissionsToRequest, REQUEST_TAG_CAMERA)); } }); } // --- 移除旧的 cameraPermissions 方法,因为它混合了相机和存储权限 --- // public void cameraPermissions(int toDo, int intImageMax) { ... } private void toLocationPermissionGuide() { MMKV kv = MMKV.mmkvWithID("afarm"); int hasAlways = kv.decodeInt("hasPhotoAlways", -1); if (hasAlways == -1) { kv.encode("hasPhotoAlways", 1); } else { Intent intent = new Intent(Settings.ACTION_APPLICATION_DETAILS_SETTINGS); Uri uri = Uri.fromParts("package", activity.getPackageName(), null); intent.setData(uri); activity.startActivity(intent); } } private static final Object REQUEST_TAG_CAMERA = "camera_request_from_util"; private int toDo_ = 1; private int intImageMax_ = 3; @Subscribe(threadMode = ThreadMode.MAIN) public void onPermissionResultEvent(PermissionResultEvent event) { Log.d("看看", "Received PermissionResultEvent. Tag: " + event.tag + ", All Granted: " + event.allGranted); if (Objects.equals(REQUEST_TAG_CAMERA, event.tag)) { if (event.allGranted) { Log.d("看看", "Permission granted, proceeding with action."); performCameraAction(); } else { Log.w("看看", "Permission denied. Result: " + event.result); handleCameraPermissionDenied(event.result); } EventBus.getDefault().removeStickyEvent(event); } } private void performCameraAction() { Log.i("看看", "Performing action..."); if (myPermissionDialog != null) { myPermissionDialog.getDialogquite(); } //同意操作 if (toDo_ == 1) { getPictureSelectorTouxiang(); } else if (toDo_ == 2) { getPictureSelector(intImageMax_); } else if (toDo_ == 3) { getPictureSelectorFalse(intImageMax_); } else if (toDo_ == 4) { getPictureSelectorNO(); } else if (toDo_ == 5) { getPictureCamera(intImageMax_); } else if (toDo_ == 6) { getPictureSelector_no_RequestedOrientation(intImageMax_); } else if (toDo_ == 7) { getPictureSelectorTouxiangPhoto(); } else if (toDo_ == 8) { getPictureSelectorTouxiangGallery(); } MMKV kv = MMKV.mmkvWithID("afarm"); kv.encode("hasPhotoAlways", 1); } private void handleCameraPermissionDenied(java.util.Map<String, Boolean> result) { Log.w("看看", "Handling permission denial. Details: " + result); if (myPermissionDialog != null) { myPermissionDialog.getDialogquite(); } } } 如何适配Scoped Storage
10-01
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值