Get File Path of Gallery Image

This will allow you to take a URI given from the Gallery browser and convert it into a direct path to the file in the file system. I needed to get the exact path a chosen image was stored in for logging purposes. There may be better ways of doing this so your input is welcome.



// To open up a gallery browser Intent intent = new Intent(); intent.setType("image/*"); intent.setAction(Intent.ACTION_GET_CONTENT); startActivityForResult(Intent.createChooser(intent, "Select Picture"),1); // To handle when an image is selected from the browser, add the following to your Activity @Override public void onActivityResult(int requestCode, int resultCode, Intent data) { if (resultCode == RESULT_OK) { if (requestCode == 1) { // currImageURI is the global variable I'm using to hold the content:// URI of the image currImageURI = data.getData(); } } } // And to convert the image URI to the direct file system path of the image file public String getRealPathFromURI(Uri contentUri) { // can post image String [] proj={MediaStore.Images.Media.DATA}; Cursor cursor = managedQuery( contentUri, proj, // Which columns to return null, // WHERE clause; which rows to return (all rows) null, // WHERE clause selection arguments (none) null); // Order-by clause (ascending by name) int column_index = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA); cursor.moveToFirst(); return cursor.getString(column_index); }

def test_ae_awb_regions(self): 274 """Test AE and AWB regions.""" 275 276 with its_session_utils.ItsSession( 277 device_id=self.dut.serial, 278 camera_id=self.camera_id, 279 hidden_physical_id=self.hidden_physical_id) as cam: 280 props = cam.get_camera_properties() 281 props = cam.override_with_hidden_physical_camera_props(props) 282 log_path = self.log_path 283 test_name_with_log_path = os.path.join(log_path, _NAME) 284 285 # Load chart for scene 286 its_session_utils.load_scene( 287 cam, props, self.scene, self.tablet, self.chart_distance, 288 log_path) 289 290 # Tap tablet to remove gallery buttons 291 if self.tablet: 292 self.tablet.adb.shell( 293 f'input tap {_TAP_COORDINATES[0]} {_TAP_COORDINATES[1]}') 294 295 # Check skip conditions 296 max_ae_regions = props['android.control.maxRegionsAe'] 297 max_awb_regions = props['android.control.maxRegionsAwb'] 298 first_api_level = its_session_utils.get_first_api_level(self.dut.serial) 299 camera_properties_utils.skip_unless( 300 first_api_level >= its_session_utils.ANDROID15_API_LEVEL and 301 camera_properties_utils.ae_regions(props) and 302 (max_awb_regions >= _AE_AWB_REGIONS_AVAILABLE or 303 max_ae_regions >= _AE_AWB_REGIONS_AVAILABLE)) 304 logging.debug('maximum AE regions: %d', max_ae_regions) 305 logging.debug('maximum AWB regions: %d', max_awb_regions) 306 307 # Find largest preview size to define capture size to find aruco markers 308 preview_size = _get_largest_common_aspect_ratio_preview_size( 309 cam, self.camera_id) 310 width = int(preview_size.split('x')[0]) 311 height = int(preview_size.split('x')[1]) 312 req = capture_request_utils.auto_capture_request() 313 fmt = {'format': 'yuv', 'width': width, 'height': height} 314 cam.do_3a() 315 cap = cam.do_capture(req, fmt) 316 317 # Save image and convert to numpy array 318 img = image_processing_utils.convert_capture_to_rgb_image( 319 cap, props=props) 320 img_path = f'{test_name_with_log_path}_aruco_markers.jpg' 321 image_processing_utils.write_image(img, img_path) 322 img = image_processing_utils.convert_image_to_uint8(img) 323 324 # Define AE/AWB metering regions 325 chart_path = f'{test_name_with_log_path}_chart_boundary.jpg' 326 ae_awb_regions = _define_metering_regions( 327 img, img_path, chart_path, props, width, height) 328 329 # Do preview recording with pre-defined AE/AWB regions 330 recording_obj = cam.do_preview_recording_with_dynamic_ae_awb_region( 331 preview_size, ae_awb_regions, _REGION_DURATION_MS) 332 logging.debug('Tested quality: %s', recording_obj['quality']) 333 334 # Grab the video from the save location on DUT 335 self.dut.adb.pull([recording_obj['recordedOutputPath'], log_path]) 336 file_name = recording_obj['recordedOutputPath'].split('/')[-1] 337 file_name_with_path = os.path.join(log_path, file_name) 338 logging.debug('file_name: %s', file_name) 339 340 # Extract 8 key frames per 8 seconds of preview recording 341 # Meters each region of 4 (blue, light, dark, yellow) for 2 seconds 342 # Unpack frames based on metering region's color 343 # If testing front camera with preview mirrored, reverse order. 344 # pylint: disable=unbalanced-tuple-unpacking 345 if ((props['android.lens.facing'] == 346 camera_properties_utils.LENS_FACING['FRONT']) and 347 props['android.sensor.orientation'] in 348 _MIRRORED_PREVIEW_SENSOR_ORIENTATIONS): 349 _, yellow, _, dark, _, light, _, blue = ( 350 _extract_and_process_key_frames_from_recording( 351 log_path, file_name)) 352 else: 353 _, blue, _, light, _, dark, _, yellow = ( 354 _extract_and_process_key_frames_from_recording( 355 log_path, file_name)) 356 357 # AWB Check : Verify R/B ratio change is greater than threshold 358 if max_awb_regions >= _AE_AWB_REGIONS_AVAILABLE: 359 _do_awb_check(blue, yellow) 360 361 # AE Check: Extract the Y component from rectangle patch 362 if max_ae_regions >= _AE_AWB_REGIONS_AVAILABLE: 363 _do_ae_check(light, dark, file_name_with_path) 364 365 if __name__ == '__main__': 366 test_runner.main() 解析函数的作用
07-22
import javax.swing.*; import javax.swing.border.Border; import java.awt.*; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.awt.image.BufferedImage; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.io.File; import java.io.FileNotFoundException; import java.util.Arrays; public class PhotoGallery extends Component implements PropertyChangeListener { private final int THUMBNAIL_SIZE = 150; // 缩略图固定宽高 static JScrollPane scrollPane; public PhotoGallery(TreeSelectionModel model){ model.addPropertyChangeListener(this); // 注册监听 } @Override public void propertyChange(PropertyChangeEvent evt) { if ("selectedPath".equals(evt.getPropertyName())) { String newPath = (String) evt.getNewValue(); updatePreview(newPath); // 根据新路径更新预览 } } private void updatePreview(String path) { System.out.println("路径已更新: " + path); // 实际业务逻辑(如加载文件内容、显示缩略图等) init(path); } public void init(String filepath) { File folder = new File(filepath); if (!folder.exists() || !folder.isDirectory()) { System.out.println("指定的文件夹路径不正确或无法识别!"); return; } File[] images = folder.listFiles((dir, name) -> name.toLowerCase().endsWith(".jpg") || name.toLowerCase().endsWith(".png") || name.toLowerCase().endsWith(".jpeg") || name.toLowerCase().endsWith(".gif") || name.toLowerCase().endsWith("bmp")); if (images == null) { System.out.println("无法列出文件夹内的项目."); return; } JPanel imageContainer = new JPanel(); imageContainer.setLayout(new GridLayout(0, 4)); // 设定网格布局,一行四列 for (File file : Arrays.asList(images)) { BufferedImage img = ImageUtils.resizeImage(file.getAbsolutePath(), THUMBNAIL_SIZE, THUMBNAIL_SIZE); JLabel label = new JLabel(new ImageIcon(img)); label.setText(file.getName()); label.setFont(new Font("微软雅黑", Font.PLAIN, 16)); // 设置字体 label.setForeground(Color.BLACK); // 设置字体颜色 label.setHorizontalTextPosition(JLabel.CENTER); // 设置文字水平对齐方式为居中 label.setVerticalTextPosition(JLabel.BOTTOM); // 设置文字垂直对齐方式为底部 JPanel panel = new JPanel(); panel.add(label); Border border = BorderFactory.createEmptyBorder(2, 2, 2, 2); panel.setBorder(border); addMouseClickListener(panel, file.getAbsolutePath()); imageContainer.add(panel); } scrollPane = new JScrollPane(imageContainer, ScrollPaneConstants.VERTICAL_SCROLLBAR_AS_NEEDED, ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER); } private void addMouseClickListener(final JComponent component, String imagePath) { MouseAdapter mouseListener = new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { JOptionPane.showMessageDialog(component.getParent(), "你选择了:" + new File(imagePath).getName(), "图片详情", JOptionPane.PLAIN_MESSAGE); // 如果需要弹出大图预览窗体或者其他操作可以在下方补充相应逻辑 } @Override public void mouseEntered(MouseEvent evt) { Border highlightedBorder = BorderFactory.createLineBorder(Color.GRAY, 2, true); ((JPanel) evt.getSource()).setBorder(highlightedBorder); } @Override public void mouseExited(MouseEvent evt) { Border defaultBorder = BorderFactory.createEmptyBorder(2, 2, 2, 2); ((JPanel) evt.getSource()).setBorder(defaultBorder); } }; component.addMouseListener(mouseListener); } }这个类作为上述中的其他类
03-09
基于粒子群优化算法的p-Hub选址优化(Matlab代码实现)内容概要:本文介绍了基于粒子群优化算法(PSO)的p-Hub选址优化问题的研究与实现,重点利用Matlab进行算法编程和仿真。p-Hub选址是物流与交通网络中的关键问题,旨在通过确定最优的枢纽节点位置和非枢纽节点的分配方式,最小化网络总成本。文章详细阐述了粒子群算法的基本原理及其在解决组合优化问题中的适应性改进,结合p-Hub中转网络的特点构建数学模型,并通过Matlab代码实现算法流程,包括初始化、适应度计算、粒子更新与收敛判断等环节。同时可能涉及对算法参数设置、收敛性能及不同规模案例的仿真结果分析,以验证方法的有效性和鲁棒性。; 适合人群:具备一定Matlab编程基础和优化算法理论知识的高校研究生、科研人员及从事物流网络规划、交通系统设计等相关领域的工程技术人员。; 使用场景及目标:①解决物流、航空、通信等网络中的枢纽选址与路径优化问题;②学习并掌握粒子群算法在复杂组合优化问题中的建模与实现方法;③为相关科研项目或实际工程应用提供算法支持与代码参考。; 阅读建议:建议读者结合Matlab代码逐段理解算法实现逻辑,重点关注目标函数建模、粒子编码方式及约束处理策略,并尝试调整参数或拓展模型以加深对算法性能的理解。
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值