lib_chan 简化版

我觉得那个lib_chan太烦,用法我也觉得不那么清晰,反正我不喜欢。
我把它改了改,其实完全不一样,就是中间人的协议类似。
这样我觉得用起来舒服些。


-module(mm).
-compile(export_all).

client_start(Address,Port,Pid) ->
spawn(fun() ->connect(Address,Port,Pid) end).

connect(Address,Port,Pid) ->
{ok,Socket}=gen_tcp:connect(Address,Port,[binary,{packet,4}]),
loop(Socket,Pid).

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

server_start(Port,Pid) ->
spawn(fun() ->start_parallel_server(Port,Pid) end),
ok.

start_parallel_server(Port,Pid) ->
link(Pid), %% Pid死的时候,listen也得死
{ok,Listen}=gen_tcp:listen(Port,[binary,{reuseaddr,true},{packet,4},{active,true}]),
spawn(fun()->par_connect(Listen,Pid) end),
%% just waiting
receive
after infinity ->
true
end.

par_connect(Listen,Pid) ->
case gen_tcp:accept(Listen) of
{ok,Socket} ->
spawn(fun()->par_connect(Listen,Pid) end),%% 启动一个新进程来accept连接,当前进程处理接受的socket
link(Pid), %% Pid死的时候,mm也得死(当前进程变成了mm)
loop(Socket,Pid);
{error,closed} ->
io:format("Listenning accept socket error,and was closed!~n")
end.

loop(Socket,Pid) ->
receive
{tcp,Socket,Data} ->
Term=binary_to_term(Data),
Pid ! {mm,self(),Term},
loop(Socket,Pid);
{tcp_closed,Socket} ->
Pid !{mm_closed,self()};
{send,Term} ->
gen_tcp:send(Socket,term_to_binary(Term)),
loop(Socket,Pid);
close -> %% when client or server exit,use it,gen a tcp_closed at another point
gen_tcp:close(Socket)
end.
``` import torch import torch.nn as nn from skimage.segmentation import chan_vese import numpy as np from torch.nn import Conv2d from torchvision.ops import FeaturePyramidNetwork from torchvision.models import resnet50 import os os.environ['KMP_DUPLICATE_LIB_OK']='TRUE' def process_conv_output(conv_output): assert conv_output.shape == (64, 16, 3, 3), "Input tensor shape is not correct!" chunks = torch.chunk(conv_output, chunks=4, dim=0) for idx, chunk in enumerate(chunks): print(f"Chunk {idx} Shape: ", chunk.shape) assert chunk.shape == (16, 16, 3, 3) return chunks class ImageSegmentationModel(nn.Module): def __init__(self): super(ImageSegmentationModel,self).__init__() self.conv_layers = nn.Sequential( nn.Conv2d(1,128,kernel_size=3,stride=2), nn.MaxPool2d(kernel_size=3,stride=2), nn.ReLU(), nn.Conv2d(128,64, kernel_size=3, stride=2), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(), nn.Conv2d(64,32,kernel_size=3,stride=2), nn.MaxPool2d(kernel_size=3,stride=2), nn.ReLU(), nn.Conv2d(32,16,kernel_size=3,stride=2) ) self.resnet = resnet50(pretrained=True) self.initial_layer = nn.Sequential( self.resnet.conv1, # 输出通道64 self.resnet.bn1, self.resnet.relu, self.resnet.maxpool # 输出通道64 ) self.layer1 = self.resnet.layer1 self.layer2 = self.resnet.layer2 self.layer3 = self.resnet.layer3 self.layer4 = self.resnet.layer4 self.fpn = FeaturePyramidNetwork([256,512,1024,2048],256) self.conv_layers11 = nn.Conv2d(256,1,kernel_size=1,stride=1) self.final_conv = nn.Conv2d(16,21,kernel_size=1,stride=1) self.softmax = nn.Softmax(dim=1) def preprocess(self,x): x = torch.nn.functional.interpolate(x,size=(511,511),mode='bilinear',align_corners=False) x = torch.mean(x,dim=1,keepdim=True) x_np = x.detach().cpu().numpy() segmented = [] for i in range(x_np.shape[0]): img = x_np[i,0] #init =np.array([[img.shape[1]-1,0],[img.shape[1]-1,img.shape[0]-1],[0,img.shape[0]-1,],[0,0]]) snake = chan_vese(img,mu=0.25, lambda1=1.0, lambda2=1.0, tol=0.001, max_num_iter=500, dt=0.5) seg = np.zeros_like(img) from skimage.draw import polygon rr, cc = polygon(snake[:,1],snake[:,0],seg.shape) seg[rr, cc] = 1 segmented.append(seg) segmented = np.array(segmented) segmented = torch.from_numpy(segmented).unsqueeze(1).float().to(x.device) return segmented def forward(self,x): y = torch.nn.functional.interpolate(x,size=(511,511),mode='bilinear',align_corners=False) x = self.preprocess(x) conv_output = self.conv_layers(x) conv_output_tuple = process_conv_output(conv_output) print("conv_output:", conv_output.shape) z = self.initial_layer(y) c1 = self.layer1(z) # [batch,256,H,W] c2 = self.layer2(c1) # [batch,512,H,W] c3 = self.layer3(c2) # [batch,1024,H,W] c4 = self.layer4(c3) # [batch,2048,H,W] fpn_input = { 'feat1': c1, 'feat2': c2, 'feat3': c3, 'feat4': c4 } fpn_output = self.fpn(fpn_input) fpn_output_upsampled = torch.nn.functional.interpolate(fpn_output['feat1'], size=(511, 511), mode='bilinear', align_corners=False) final_output = nn.functional.conv2d(fpn_output_upsampled,conv_output_tuple,stride=1,padding=1,groups=16) final_output = self.final_conv(final_output) final_output = self.softmax(final_output) return final_output```改动不涉及fpn_output请重新修改
03-20
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值