Including a Module with append_features

本文探讨了在Ruby中如何利用模块的append_features方法来扩展类的功能。通过具体实例展示了如何定义类级方法和使模块的实例方法变为类方法。

Including a Module with append_features

module MyMod

def MyMod.append_features(someClass)
def someClass.modmeth
puts "Module (class) method"
end
super # This call is necessary!
end

def meth1
puts "Method 1"
end

end


class MyClass

include MyMod

def MyClass.classmeth
puts "Class method"
end

def meth2
puts "Method 2"
end

end


x = MyClass.new

# Output:
MyClass.classmeth # Class method
x.meth1 # Method 1
MyClass.modmeth # Module (class) method
x.meth2 # Method 2

This example is worth examining in detail. First, we should understand that append_features isn't just a hook that is called when an include happens; it actually does the work of the include operation. That's why the call to super is needed; without it, the rest of the module (in this case, meth1) wouldn't be included at all.

Also note that within the append_features call, there is a method definition. This looks unusual, but it works because the inner method definition is a singleton method (class-level or module-level). An attempt to define an instance method in the same way would result in a Nested method error.

Conceivably a module might want to determine the initiator of a mixin. The append_features method can also be used for this because the class is passed in as a parameter.

It is also possible to mix in the instance methods of a module as class methods. 

module MyMod

def meth3
puts "Module instance method meth3"
puts "can become a class method."
end

end


class MyClass

class << self # Here, self is MyClass
include MyMod
end

end


MyClass.meth3

# Output:
# Module instance method meth3
# can become a class method.

The extend method is useful here. This example simply becomes:

class MyClass
extend MyMod
end
我这样做 import torch import torch.nn as nn import torch.nn.functional as F import math from enum import Enum from torch.nn.parameter import Parameter # 论文题目:QUANTIZED SPIKE-DRIVEN TRANSFORMER # 论文链接:https://arxiv.org/pdf/2501.13492 # 官方github: https://github.com/bollossom/QSD-Transformer/blob/main/classification/quan_w.py # 代码改进者:一勺汤 class ReLUX(nn.Module): def __init__(self, thre=8): super(ReLUX, self).__init__() self.thre = thre def forward(self, input): return torch.clamp(input, 0, self.thre) relu4 = ReLUX(thre=4) class multispike(torch.autograd.Function): @staticmethod def forward(ctx, input, lens): ctx.save_for_backward(input) ctx.lens = lens return torch.floor(relu4(input) + 0.5) @staticmethod def backward(ctx, grad_output): input, = ctx.saved_tensors grad_input = grad_output.clone() temp1 = 0 < input temp2 = input < ctx.lens return grad_input * temp1.float() * temp2.float(), None class Multispike(nn.Module): def __init__(self, lens=4, spike=multispike): super().__init__() self.lens = lens self.spike = spike def forward(self, inputs): return self.spike.apply(4 * inputs, self.lens) / 4 def grad_scale(x, scale): y = x y_grad = x * scale return y.detach() - y_grad.detach() + y_grad def round_pass(x): y = x.round() y_grad = x return y.detach() - y_grad.detach() + y_grad class Qmodes(Enum): layer_wise = 1 kernel_wise = 2 class _LinearQ(nn.Linear): def __init__(self, in_features, out_features, bias=True, **kwargs_q): #print(in_features, out_features) super(_LinearQ, self).__init__(in_features=in_features, out_features=out_features, bias=bias) self.kwargs_q = get_default_kwargs_q(kwargs_q, layer_type=self) self.nbits = kwargs_q['nbits'] if self.nbits < 0: self.register_parameter('alpha', None) return self.q_mode = kwargs_q['mode'] self.alpha = Parameter(torch.Tensor(1)) if self.q_mode == Qmodes.kernel_wise: self.alpha = Parameter(torch.Tensor(out_features)) self.register_buffer('init_state', torch.zeros(1)) def add_param(self, param_k, param_v): self.kwargs_q[param_k] = param_v def extra_repr(self): s_prefix = super(_LinearQ, self).extra_repr() if self.alpha is None: return '{}, fake'.format(s_prefix) return '{}, {}'.format(s_prefix, self.kwargs_q) class _ActQ(nn.Module): def __init__(self, in_features, **kwargs_q): super(_ActQ, self).__init__() self.kwargs_q = get_default_kwargs_q(kwargs_q, layer_type=self) self.nbits = kwargs_q['nbits'] if self.nbits < 0: self.register_parameter('alpha', None) self.register_parameter('zero_point', None) return # self.signed = kwargs_q['signed'] self.q_mode = kwargs_q['mode'] self.alpha = Parameter(torch.Tensor(1)) self.zero_point = Parameter(torch.Tensor([0])) if self.q_mode == Qmodes.kernel_wise: self.alpha = Parameter(torch.Tensor(in_features)) self.zero_point = Parameter(torch.Tensor(in_features)) torch.nn.init.zeros_(self.zero_point) # self.zero_point = Parameter(torch.Tensor([0])) self.register_buffer('init_state', torch.zeros(1)) self.register_buffer('signed', torch.zeros(1)) def add_param(self, param_k, param_v): self.kwargs_q[param_k] = param_v def set_bit(self, nbits): self.kwargs_q['nbits'] = nbits def extra_repr(self): # s_prefix = super(_ActQ, self).extra_repr() if self.alpha is None: return 'fake' return '{}'.format(self.kwargs_q) def get_default_kwargs_q(kwargs_q, layer_type): default = { 'nbits': 4 } if isinstance(layer_type, _Conv2dQ): default.update({ 'mode': Qmodes.layer_wise}) elif isinstance(layer_type, _LinearQ): pass elif isinstance(layer_type, _ActQ): pass # default.update({ # 'signed': 'Auto'}) else: assert NotImplementedError return for k, v in default.items(): if k not in kwargs_q: kwargs_q[k] = v return kwargs_q class _Conv2dQ(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, **kwargs_q): super(_Conv2dQ, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.kwargs_q = get_default_kwargs_q(kwargs_q, layer_type=self) self.nbits = kwargs_q['nbits'] if self.nbits < 0: self.register_parameter('alpha', None) return self.q_mode = kwargs_q['mode'] if self.q_mode == Qmodes.kernel_wise: self.alpha = Parameter(torch.Tensor(out_channels)) else: # layer-wise quantization self.alpha = Parameter(torch.Tensor(1)) self.register_buffer('init_state', torch.zeros(1)) def add_param(self, param_k, param_v): self.kwargs_q[param_k] = param_v def set_bit(self, nbits): self.kwargs_q['nbits'] = nbits def extra_repr(self): s_prefix = super(_Conv2dQ, self).extra_repr() if self.alpha is None: return '{}, fake'.format(s_prefix) return '{}, {}'.format(s_prefix, self.kwargs_q) class ActLSQ(_ActQ): def __init__(self, in_features, nbits_a=4, mode=Qmodes.kernel_wise, **kwargs): super(ActLSQ, self).__init__(in_features=in_features, nbits=nbits_a, mode=mode) # print(self.alpha.shape, self.zero_point.shape) def forward(self, x): return x class Conv2dLSQ(_Conv2dQ): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, nbits_w=4, mode=Qmodes.kernel_wise, **kwargs): super(Conv2dLSQ, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, nbits=nbits_w, mode=mode) self.act = ActLSQ(in_features=in_channels, nbits_a=nbits_w) def forward(self, x): if self.alpha is None: return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) # w_reshape = self.weight.reshape([self.weight.shape[0], -1]).transpose(0, 1) Qn = -2 ** (self.nbits - 1) Qp = 2 ** (self.nbits - 1) - 1 if self.training and self.init_state == 0: # self.alpha.data.copy_(self.weight.abs().max() / 2 ** (self.nbits - 1)) self.alpha.data.copy_(2 * self.weight.abs().mean() / math.sqrt(Qp)) # self.alpha.data.copy_(self.weight.abs().max() * 2) self.init_state.fill_(1) """ Implementation according to paper. Feels wrong ... When we initialize the alpha as a big number (e.g., self.weight.abs().max() * 2), the clamp function can be skipped. Then we get w_q = w / alpha * alpha = w, and $\frac{\partial w_q}{\partial \alpha} = 0$ As a result, I don't think the pseudo-code in the paper echoes the formula. Please see jupyter/STE_LSQ.ipynb fo detailed comparison. """ g = 1.0 / math.sqrt(self.weight.numel() * Qp) # Method1: 31GB GPU memory (AlexNet w4a4 bs 2048) 17min/epoch alpha = grad_scale(self.alpha, g) # print(alpha.shape) # print(self.weight.shape) alpha = alpha.unsqueeze(1).unsqueeze(2).unsqueeze(3) w_q = round_pass((self.weight / alpha).clamp(Qn, Qp)) * alpha x = self.act(x) # w = w.clamp(Qn, Qp) # q_w = round_pass(w) # w_q = q_w * alpha # Method2: 25GB GPU memory (AlexNet w4a4 bs 2048) 32min/epoch # w_q = FunLSQ.apply(self.weight, self.alpha, g, Qn, Qp) # wq = y.transpose(0, 1).reshape(self.weight.shape).detach() + self.weight - self.weight.detach() return F.conv2d(x, w_q, self.bias, self.stride, self.padding, self.dilation, self.groups) class BNAndPadLayer(nn.Module): def __init__( self, pad_pixels, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, ): super(BNAndPadLayer, self).__init__() self.bn = nn.BatchNorm2d( num_features, eps, momentum, affine, track_running_stats ) self.pad_pixels = pad_pixels def forward(self, input): output = self.bn(input) if self.pad_pixels > 0: if self.bn.affine: pad_values = ( self.bn.bias.detach() - self.bn.running_mean * self.bn.weight.detach() / torch.sqrt(self.bn.running_var + self.bn.eps) ) else: pad_values = -self.bn.running_mean / torch.sqrt( self.bn.running_var + self.bn.eps ) output = F.pad(output, [self.pad_pixels] * 4) pad_values = pad_values.view(1, -1, 1, 1) output[:, :, 0: self.pad_pixels, :] = pad_values output[:, :, -self.pad_pixels:, :] = pad_values output[:, :, :, 0: self.pad_pixels] = pad_values output[:, :, :, -self.pad_pixels:] = pad_values return output @property def weight(self): return self.bn.weight @property def bias(self): return self.bn.bias @property def running_mean(self): return self.bn.running_mean @property def running_var(self): return self.bn.running_var @property def eps(self): return self.bn.eps class RepConv(nn.Module): def __init__( self, in_channel, out_channel, bias=False, ): super().__init__() # hidden_channel = in_channel conv1x1 = Conv2dLSQ(in_channel, in_channel, 1, 1, 0, bias=False, groups=1) bn = BNAndPadLayer(pad_pixels=1, num_features=in_channel) conv3x3 = nn.Sequential( Conv2dLSQ(in_channel, in_channel, 3, 1, 0, groups=in_channel, bias=False), Conv2dLSQ(in_channel, out_channel, 1, 1, 0, groups=1, bias=False), nn.BatchNorm2d(out_channel), ) self.body = nn.Sequential(conv1x1, bn, conv3x3) def forward(self, x): return self.body(x) class Multispike_att(nn.Module): def __init__(self, lens=4, spike=multispike): super().__init__() self.lens = lens self.spike = spike def forward(self, inputs): return self.spike.apply(4 * inputs, self.lens) / 2 class MS_Attention_RepConv_qkv_id(nn.Module): def __init__( self, dim, num_heads=8, ): super().__init__() assert ( dim % num_heads == 0 ), f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads self.scale = 0.25 self.head_lif = Multispike() self.q_conv = nn.Sequential(RepConv(dim, dim, bias=False), nn.BatchNorm2d(dim)) self.k_conv = nn.Sequential(RepConv(dim, dim, bias=False), nn.BatchNorm2d(dim)) self.v_conv = nn.Sequential(RepConv(dim, dim, bias=False), nn.BatchNorm2d(dim)) self.q_lif = Multispike() self.k_lif = Multispike() self.v_lif = Multispike() self.attn_lif = Multispike_att() self.proj_conv = nn.Sequential( RepConv(dim, dim, bias=False), nn.BatchNorm2d(dim) ) def forward(self, x): x = x.unsqueeze(0) T, B, C, H, W = x.shape N = H * W x = self.head_lif(x) q = self.q_conv(x.flatten(0, 1)).reshape(T, B, C, H, W) k = self.k_conv(x.flatten(0, 1)).reshape(T, B, C, H, W) v = self.v_conv(x.flatten(0, 1)).reshape(T, B, C, H, W) q = self.q_lif(q).flatten(3) q = ( q.transpose(-1, -2) .reshape(T, B, N, self.num_heads, C // self.num_heads) .permute(0, 1, 3, 2, 4) .contiguous() ) k = self.k_lif(k).flatten(3) k = ( k.transpose(-1, -2) .reshape(T, B, N, self.num_heads, C // self.num_heads) .permute(0, 1, 3, 2, 4) .contiguous() ) v = self.v_lif(v).flatten(3) v = ( v.transpose(-1, -2) .reshape(T, B, N, self.num_heads, C // self.num_heads) .permute(0, 1, 3, 2, 4) .contiguous() ) x = k.transpose(-2, -1) @ v x = (q @ x) * self.scale x = x.transpose(3, 4).reshape(T, B, C, N).contiguous() x = self.attn_lif(x).reshape(T, B, C, H, W) x = x.reshape(T, B, C, H, W) x = x.flatten(0, 1) x = self.proj_conv(x).reshape(T, B, C, H, W) x = x.squeeze(0) return x def autopad(k, p=None, d=1): # kernel, padding, dilation """Pad to 'same' shape outputs.""" if d > 1: k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p class Conv(nn.Module): """Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation).""" default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): """Initialize Conv layer with given arguments including activation.""" super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): """Apply convolution, batch normalization and activation to input tensor.""" return self.act(self.bn(self.conv(x))) def forward_fuse(self, x): """Perform transposed convolution of 2D data.""" return self.act(self.conv(x)) class PSABloc_MSAR(nn.Module): """ PSABlock class implementing a Position-Sensitive Attention block for neural networks. This class encapsulates the functionality for applying multi-head attention and feed-forward neural network layers with optional shortcut connections. Attributes: attn (Attention): Multi-head attention module. ffn (nn.Sequential): Feed-forward neural network module. add (bool): Flag indicating whether to add shortcut connections. Methods: forward: Performs a forward pass through the PSABlock, applying attention and feed-forward layers. Examples: Create a PSABlock and perform a forward pass >>> psablock = PSABlock(c=128, attn_ratio=0.5, num_heads=4, shortcut=True) >>> input_tensor = torch.randn(1, 128, 32, 32) >>> output_tensor = psablock(input_tensor) """ def __init__(self, c, attn_ratio=0.5, num_heads=4, shortcut=True) -> None: """Initializes the PSABlock with attention and feed-forward layers for enhanced feature extraction.""" super().__init__() self.attn = MS_Attention_RepConv_qkv_id(dim=c, num_heads=num_heads) self.ffn = nn.Sequential(Conv(c, c * 2, 1), Conv(c * 2, c, 1, act=False)) self.add = shortcut def forward(self, x): """Executes a forward pass through PSABlock, applying attention and feed-forward layers to the input tensor.""" x = x + self.attn(x) if self.add else self.attn(x) x = x + self.ffn(x) if self.add else self.ffn(x) return x class C2PSA_MSAR(nn.Module): """ C2PSA module with attention mechanism for enhanced feature extraction and processing. This module implements a convolutional block with attention mechanisms to enhance feature extraction and processing capabilities. It includes a series of PSABlock modules for self-attention and feed-forward operations. Attributes: c (int): Number of hidden channels. cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. m (nn.Sequential): Sequential container of PSABlock modules for attention and feed-forward operations. Methods: forward: Performs a forward pass through the C2PSA module, applying attention and feed-forward operations. Notes: This module essentially is the same as PSA module, but refactored to allow stacking more PSABlock modules. Examples: >>> c2psa = C2PSA(c1=256, c2=256, n=3, e=0.5) >>> input_tensor = torch.randn(1, 256, 64, 64) >>> output_tensor = c2psa(input_tensor) """ def __init__(self, c1, c2, n=1, e=0.5): """Initializes the C2PSA module with specified input/output channels, number of layers, and expansion ratio.""" super().__init__() assert c1 == c2 self.c = int(c1 * e) self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv(2 * self.c, c1, 1) self.m = nn.Sequential(*(PSABloc_MSAR(self.c, attn_ratio=0.5, num_heads=self.c // 64) for _ in range(n))) def forward(self, x): """Processes the input tensor 'x' through a series of PSA blocks and returns the transformed tensor.""" a, b = self.cv1(x).split((self.c, self.c), dim=1) b = self.m(b) return self.cv2(torch.cat((a, b), 1)) def main(): # 设置随机种子以确保结果可重复 torch.manual_seed(42) # 定义输入张量 (批次大小 B=2, 通道数 C=64, 高度 H=16, 宽度 W=16) B, C, H, W = 2, 64, 7, 16 x = torch.randn(B, C, H, W) # 随机生成输入张量 # 初始化 MS_Attention_RepConv_qkv_id 模块 dim = C # 输入通道数 num_heads = 8 # 多头注意力机制的头数 attention_module = MS_Attention_RepConv_qkv_id(dim=dim, num_heads=num_heads) # 打印输入张量的形状 print("Input shape:", x.shape) # 前向传播 output = attention_module(x) # 打印输出张量的形状 print("Output shape:", output.shape) # 打印输出张量的最小值和最大值 print("Output min value:", output.min().item()) print("Output max value:", output.max().item()) if __name__ == "__main__": main()
11-15
# Copyright (c), ETH Zurich and UNC Chapel Hill. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of # its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. cmake_minimum_required(VERSION 3.12) ################################################################################ # Options ################################################################################ option(SIMD_ENABLED "Whether to enable SIMD optimizations" ON) option(OPENMP_ENABLED "Whether to enable OpenMP parallelization" ON) option(IPO_ENABLED "Whether to enable interprocedural optimization" ON) option(CUDA_ENABLED "Whether to enable CUDA, if available" ON) option(GUI_ENABLED "Whether to enable the graphical UI" ON) option(OPENGL_ENABLED "Whether to enable OpenGL, if available" ON) option(TESTS_ENABLED "Whether to build test binaries" OFF) option(COVERAGE_ENABLED "Whether to enable code coverage" OFF) option(ASAN_ENABLED "Whether to enable AddressSanitizer flags" OFF) option(TSAN_ENABLED "Whether to enable ThreadSanitizer flags" OFF) option(UBSAN_ENABLED "Whether to enable UndefinedBehaviorSanitizer flags" OFF) option(PROFILING_ENABLED "Whether to enable google-perftools linker flags" OFF) option(CCACHE_ENABLED "Whether to enable compiler caching, if available" ON) option(CGAL_ENABLED "Whether to enable the CGAL library" ON) option(LSD_ENABLED "Whether to enable the LSD library" ON) option(DOWNLOAD_ENABLED "Whether to enable (automatic) download of resources (requires Curl/OpenSSL)" ON) option(UNINSTALL_ENABLED "Whether to create a target to 'uninstall' colmap" ON) option(FETCH_POSELIB "Whether to consume PoseLib using FetchContent or find_package" OFF) option(FETCH_FAISS "Whether to consume faiss using FetchContent or find_package" OFF) option(ALL_SOURCE_TARGET "Whether to create a target for all source files (for Visual Studio / XCode development)" OFF) # Disables default features, as we specify each required feature manually below. list(APPEND VCPKG_MANIFEST_FEATURES "core") # Propagate options to vcpkg manifest. if(TESTS_ENABLED) list(APPEND VCPKG_MANIFEST_FEATURES "tests") endif() if(CUDA_ENABLED) list(APPEND VCPKG_MANIFEST_FEATURES "cuda") endif() if(GUI_ENABLED) list(APPEND VCPKG_MANIFEST_FEATURES "gui") endif() if(CGAL_ENABLED) list(APPEND VCPKG_MANIFEST_FEATURES "cgal") endif() if(DOWNLOAD_ENABLED) list(APPEND VCPKG_MANIFEST_FEATURES "download") endif() project(COLMAP LANGUAGES C CXX) set(COLMAP_VERSION "3.13.0.dev0") set(POSELIB_DIR "/data/code/PoseLib-master" CACHE PATH "Path to local PoseLib source") add_subdirectory(${POSELIB_DIR} poselib) set(FAISS_SOURCE_DIR "/data/code/faiss-36b77353dc435383e0c23a709e7997a29d049041" CACHE PATH "Path to local FAISS source") add_subdirectory(${FAISS_SOURCE_DIR} faiss) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CUDA_STANDARD 17) set(CMAKE_CUDA_STANDARD_REQUIRED ON) set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES ON) ################################################################################ # Include CMake dependencies ################################################################################ set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) include(CheckCXXCompilerFlag) include(GNUInstallDirs) # Include helper macros and commands, and allow the included file to override # the CMake policies in this file include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/CMakeHelper.cmake NO_POLICY_SCOPE) # Build position-independent code, so that shared libraries can link against # COLMAP's static libraries. set(CMAKE_POSITION_INDEPENDENT_CODE ON) ################################################################################ # Dependency configuration ################################################################################ set(COLMAP_FIND_QUIETLY FALSE) include(cmake/FindDependencies.cmake) ################################################################################ # Compiler specific configuration ################################################################################ if(CMAKE_BUILD_TYPE) message(STATUS "Build type specified as ${CMAKE_BUILD_TYPE}") else() message(STATUS "Build type not specified, using Release") set(CMAKE_BUILD_TYPE Release) set(IS_DEBUG OFF) endif() if("${CMAKE_BUILD_TYPE}" STREQUAL "ClangTidy") find_program(CLANG_TIDY_EXE NAMES clang-tidy) if(NOT CLANG_TIDY_EXE) message(FATAL_ERROR "Could not find the clang-tidy executable, please set CLANG_TIDY_EXE") endif() else() unset(CLANG_TIDY_EXE) endif() if(IS_MSVC) # Some fixes for the Glog library. add_compile_definitions(GLOG_USE_GLOG_EXPORT) add_compile_definitions(GLOG_NO_ABBREVIATED_SEVERITIES) add_compile_definitions(GL_GLEXT_PROTOTYPES) add_compile_definitions(NOMINMAX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") # Disable warning: 'initializing': conversion from 'X' to 'Y', possible loss of data set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4267 /wd4305") # Enable object level parallel builds in Visual Studio. set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug" OR "${CMAKE_BUILD_TYPE}" STREQUAL "RelWithDebInfo") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /bigobj") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") endif() endif() if(IS_GNU) if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) message(FATAL_ERROR "GCC version 4.8 or older not supported") endif() endif() if(IS_MACOS) # Mitigate CMake limitation, see: https://discourse.cmake.org/t/avoid-duplicate-linking-to-avoid-xcode-15-warnings/9084/10 add_link_options(LINKER:-no_warn_duplicate_libraries) endif() if(IS_DEBUG) add_compile_definitions(EIGEN_INITIALIZE_MATRICES_BY_NAN) endif() if(SIMD_ENABLED) message(STATUS "Enabling SIMD support") else() message(STATUS "Disabling SIMD support") endif() if(IPO_ENABLED AND NOT IS_DEBUG AND NOT IS_GNU) message(STATUS "Enabling interprocedural optimization") set_property(DIRECTORY PROPERTY INTERPROCEDURAL_OPTIMIZATION 1) else() message(STATUS "Disabling interprocedural optimization") endif() if(ASAN_ENABLED) message(STATUS "Enabling ASan support") if(IS_CLANG OR IS_GNU) add_compile_options(-fsanitize=address -fno-omit-frame-pointer -fsanitize-address-use-after-scope) add_link_options(-fsanitize=address) else() message(FATAL_ERROR "Unsupported compiler for ASan mode") endif() endif() if(TSAN_ENABLED) message(STATUS "Enabling TSan support") if(IS_CLANG OR IS_GNU) add_compile_options(-fsanitize=thread) add_link_options(-fsanitize=thread) else() message(FATAL_ERROR "Unsupported compiler for TSan mode") endif() endif() if(UBSAN_ENABLED) message(STATUS "Enabling UBsan support") if(IS_CLANG OR IS_GNU) add_compile_options(-fsanitize=undefined) add_link_options(-fsanitize=undefined) else() message(FATAL_ERROR "Unsupported compiler for UBsan mode") endif() endif() if(CCACHE_ENABLED) find_program(CCACHE ccache) if(CCACHE) message(STATUS "Enabling ccache support") set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE}) set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE}) else() message(STATUS "Disabling ccache support") endif() else() message(STATUS "Disabling ccache support") endif() if(PROFILING_ENABLED) message(STATUS "Enabling profiling support") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -lprofiler -ltcmalloc") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -lprofiler -ltcmalloc") else() message(STATUS "Disabling profiling support") endif() if(TESTS_ENABLED) message(STATUS "Enabling tests") enable_testing() include(CTest) else() message(STATUS "Disabling tests") endif() if(COVERAGE_ENABLED) if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") message(FATAL_ERROR "Coverage can only be enabled in Debug or RelWithDebInfo mode") endif() message(STATUS "Enabling coverage support") else() message(STATUS "Disabling coverage support") endif() ################################################################################ # Add sources ################################################################################ # Generate source file with version definitions. include(GenerateVersionDefinitions) include_directories(src) link_directories(${COLMAP_LINK_DIRS}) add_subdirectory(src/colmap) add_subdirectory(src/thirdparty) ################################################################################ # Generate source groups for Visual Studio, XCode, etc. ################################################################################ COLMAP_ADD_SOURCE_DIR(src/colmap/controllers CONTROLLERS_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/estimators ESTIMATORS_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/exe EXE_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/feature FEATURE_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/geometry GEOMETRY_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/image IMAGE_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/math MATH_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/mvs MVS_SRCS *.h *.cc *.cu) COLMAP_ADD_SOURCE_DIR(src/colmap/optim OPTIM_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/retrieval RETRIEVAL_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/scene SCENE_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/sensor SENSOR_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/sfm SFM_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/tools TOOLS_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/ui UI_SRCS *.h *.cc) COLMAP_ADD_SOURCE_DIR(src/colmap/util UTIL_SRCS *.h *.cc) if(LSD_ENABLED) COLMAP_ADD_SOURCE_DIR(src/thirdparty/LSD THIRDPARTY_LSD_SRCS *.h *.c) endif() COLMAP_ADD_SOURCE_DIR(src/thirdparty/PoissonRecon THIRDPARTY_POISSON_RECON_SRCS *.h *.cpp *.inl) COLMAP_ADD_SOURCE_DIR(src/thirdparty/SiftGPU THIRDPARTY_SIFT_GPU_SRCS *.h *.cpp *.cu) COLMAP_ADD_SOURCE_DIR(src/thirdparty/VLFeat THIRDPARTY_VLFEAT_SRCS *.h *.c *.tc) # Add all of the source files to a regular library target, as using a custom # target does not allow us to set its C++ include directories (and thus # intellisense can't find any of the included files). if(ALL_SOURCE_TARGET) set(ALL_SRCS ${CONTROLLERS_SRCS} ${ESTIMATORS_SRCS} ${EXE_SRCS} ${FEATURE_SRCS} ${GEOMETRY_SRCS} ${IMAGE_SRCS} ${MATH_SRCS} ${MVS_SRCS} ${OPTIM_SRCS} ${RETRIEVAL_SRCS} ${SCENE_SRCS} ${SENSOR_SRCS} ${SFM_SRCS} ${TOOLS_SRCS} ${UI_SRCS} ${UTIL_SRCS} ${THIRDPARTY_POISSON_RECON_SRCS} ${THIRDPARTY_SIFT_GPU_SRCS} ${THIRDPARTY_VLFEAT_SRCS} ) if(LSD_ENABLED) list(APPEND ALL_SRCS ${THIRDPARTY_LSD_SRCS} ) endif() add_library( ${COLMAP_SRC_ROOT_FOLDER} ${ALL_SRCS} ) # Prevent the library from being compiled automatically. set_target_properties( ${COLMAP_SRC_ROOT_FOLDER} PROPERTIES EXCLUDE_FROM_ALL 1 EXCLUDE_FROM_DEFAULT_BUILD 1) endif() ################################################################################ # Install and uninstall scripts ################################################################################ # Install batch scripts under Windows. if(IS_MSVC) install(FILES "scripts/shell/COLMAP.bat" "scripts/shell/RUN_TESTS.bat" PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE DESTINATION "/") endif() # Install application meny entry under Linux/Unix. if(UNIX AND NOT APPLE) install(FILES "doc/COLMAP.desktop" DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/applications") endif() # Configure the uninstallation script. if(UNINSTALL_ENABLED) configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/CMakeUninstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/CMakeUninstall.cmake" IMMEDIATE @ONLY) add_custom_target(uninstall COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/CMakeUninstall.cmake) set_target_properties(uninstall PROPERTIES FOLDER ${CMAKE_TARGETS_ROOT_FOLDER}) endif() set(COLMAP_EXPORT_LIBS # Internal. colmap_controllers colmap_estimators colmap_exe colmap_feature_types colmap_feature colmap_geometry colmap_image colmap_math colmap_mvs colmap_optim colmap_retrieval colmap_scene colmap_sensor colmap_sfm colmap_util # Third-party. colmap_poisson_recon colmap_vlfeat ) if(LSD_ENABLED) list(APPEND COLMAP_EXPORT_LIBS # Third-party. colmap_lsd ) endif() if(GUI_ENABLED) list(APPEND COLMAP_EXPORT_LIBS colmap_ui ) endif() if(CUDA_ENABLED) list(APPEND COLMAP_EXPORT_LIBS colmap_util_cuda colmap_mvs_cuda ) endif() if(GPU_ENABLED) list(APPEND COLMAP_EXPORT_LIBS colmap_sift_gpu ) endif() if(FETCH_POSELIB) list(APPEND COLMAP_EXPORT_LIBS PoseLib) endif() if(FETCH_FAISS) list(APPEND COLMAP_EXPORT_LIBS faiss) endif() # Add unified interface library target to export. add_library(colmap INTERFACE) target_link_libraries(colmap INTERFACE ${COLMAP_EXPORT_LIBS}) target_include_directories( colmap INTERFACE $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>) install( TARGETS colmap ${COLMAP_EXPORT_LIBS} EXPORT colmap-targets LIBRARY DESTINATION thirdparty/) # Generate config and version. include(CMakePackageConfigHelpers) set(PACKAGE_CONFIG_FILE "${CMAKE_CURRENT_BINARY_DIR}/colmap-config.cmake") set(INSTALL_CONFIG_DIR "${CMAKE_INSTALL_DATAROOTDIR}/colmap") configure_package_config_file( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/colmap-config.cmake.in ${PACKAGE_CONFIG_FILE} INSTALL_DESTINATION ${INSTALL_CONFIG_DIR}) install(FILES ${PACKAGE_CONFIG_FILE} DESTINATION ${INSTALL_CONFIG_DIR}) configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/colmap-config-version.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/colmap-config-version.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/colmap-config-version.cmake" DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/colmap") # Install targets. install( EXPORT colmap-targets FILE colmap-targets.cmake NAMESPACE colmap:: DESTINATION ${INSTALL_CONFIG_DIR}) # Install header files. install( DIRECTORY src/colmap DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.h") install( DIRECTORY src/thirdparty DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/colmap FILES_MATCHING REGEX ".*[.]h|.*[.]hpp|.*[.]inl") # Install find_package scripts for dependencies. install( DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cmake DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/colmap FILES_MATCHING PATTERN "Find*.cmake")
09-16
【无人机】基于改进粒子群算法的无人机路径规划研究[和遗传算法、粒子群算法进行比较](Matlab代码实现)内容概要:本文围绕基于改进粒子群算法的无人机路径规划展开研究,重点探讨了在复杂环境中利用改进粒子群算法(PSO)实现无人机三维路径规划的方法,并将其与遗传算法(GA)、标准粒子群算法等传统优化算法进行对比分析。研究内容涵盖路径规划的多目标优化、避障策略、航路点约束以及算法收敛性和寻优能力的评估,所有实验均通过Matlab代码实现,提供了完整的仿真验证流程。文章还提到了多种智能优化算法在无人机路径规划中的应用比较,突出了改进PSO在收敛速度和全局寻优方面的优势。; 适合人群:具备一定Matlab编程基础和优化算法知识的研究生、科研人员及从事无人机路径规划、智能优化算法研究的相关技术人员。; 使用场景及目标:①用于无人机在复杂地形或动态环境下的三维路径规划仿真研究;②比较不同智能优化算法(如PSO、GA、蚁群算法、RRT等)在路径规划中的性能差异;③为多目标优化问题提供算法选型和改进思路。; 阅读建议:建议读者结合文中提供的Matlab代码进行实践操作,重点关注算法的参数设置、适应度函数设计及路径约束处理方式,同时可参考文中提到的多种算法对比思路,拓展到其他智能优化算法的研究与改进中。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值