Former-commit-id: f2086e94dcaa6d6b2b3d743483ac733e16ffefa6
This commit is contained in:
2025-05-26 19:16:51 +08:00
parent 34037812b8
commit 85413a8594
684 changed files with 0 additions and 2040 deletions

View File

@ -1,4 +0,0 @@
Input
Output
Imgs
*.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

View File

@ -1,23 +0,0 @@
# 基于计算机视觉的AI滴定控制装置
## Mlabs AI Titration 1.0
## **[慕乐网络科技(大连)有限公司, MoolsNet](https://www.mools.net/)**
![Logo](组合logo.png?raw=true)
## 本文件夹存放自动滴定控制代码
**predictor_burette.py** 是滴定管版本的程序文件
**predictor_Syringe_Pump.py** 是注射器版本的程序文件
**predictor_Syringe_Pump.py**:是蠕动泵版本的程序文件
**resnet34-1Net.pth 等** 是调用的权重文件,由训练程序获得
**class_indices.json** 记录了分类信息,需要与训练程序一致
**burette_clearair.py** 简单的电机控制程序,用来清空滴定管内的气泡
**burette_velocity.py** 简单的电机控制程序,用来调整主程序运行时阀门的开度,达到一滴一滴的效果
如果不清楚程序使用的串口号,请打开电脑的设备管理器-COM串口找到对应的CH340串口对应的串口号或参考视频教程

View File

@ -1,24 +0,0 @@
import serial
import time
port = "COM5" # 串口名,根据实际情况修改
baudrate = 9600 # 波特率
ser = serial.Serial(port, baudrate)
data = b"q1h16d" # 设定为每分钟转16圈的模式比慢滴模式角度稍微大一点点
ser.write(data)
time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(3.01) # 这个等待时间决定了阀门开启时间
data = b"q6h2d" # 顺时针
ser.write(data)
ser.close() # 关闭串口

View File

@ -1,35 +0,0 @@
import serial
import time
port = "COM5" # 串口名,根据实际情况修改
baudrate = 9600 # 波特率,根据实际情况修改
ser = serial.Serial(port, baudrate)
# 这里模拟的是开度略大的情况
data = b"q1h14d"
ser.write(data)
time.sleep(0.01)
data = b"q2h90d"
ser.write(data)
time.sleep(0.01)
# 以上是逆时针旋转的速度参数,由于开度略大,这里设定了一个比原始开度略小的速度
# 可以反复调节,以达到逐滴滴定的效果
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(5.01)
# 这里的等待时间决定了阀门开启时间不能小于1秒也就是阀门转开的时间
data = b"q1h15d"
ser.write(data)
time.sleep(0.01)
data = b"q2h0d"
ser.write(data)
time.sleep(0.01)
# 回转的速度参数,建议与主程序中慢滴的速度一致,保证正反转角度一致
data = b"q6h2d" # 顺时针
ser.write(data)
ser.close()

View File

@ -1,4 +0,0 @@
{
"0": "orange",
"1": "yellow"
}

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1ca6d267a1b151f02a2096b1e7fbcea8abc298b6feec224c200a8b9a81fc2fc8
size 107513

View File

@ -1,198 +0,0 @@
import torch.nn as nn
import torch
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""
注意原论文中在虚线残差结构的主分支上第一个1x1卷积层的步距是2第二个3x3卷积层步距是1。
但在pytorch官方实现过程中是第一个1x1卷积层的步距是1第二个3x3卷积层步距是2
这么做的好处是能够在top1上提升大概0.5%的准确率。
可参考Resnet v1.5 https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch
"""
expansion = 4
def __init__(self, in_channel, out_channel, stride=1, downsample=None,
groups=1, width_per_group=64):
super(Bottleneck, self).__init__()
width = int(out_channel * (width_per_group / 64.)) * groups
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width,
kernel_size=1, stride=1, bias=False) # squeeze channels
self.bn1 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups,
kernel_size=3, stride=stride, bias=False, padding=1)
self.bn2 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel*self.expansion,
kernel_size=1, stride=1, bias=False) # unsqueeze channels
self.bn3 = nn.BatchNorm2d(out_channel*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
blocks_num,
num_classes=1000,
include_top=True,
groups=1,
width_per_group=64):
super(ResNet, self).__init__()
self.include_top = include_top
self.in_channel = 64
self.groups = groups
self.width_per_group = width_per_group
self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.in_channel)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, blocks_num[0])
self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)
self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)
self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)
if self.include_top:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def _make_layer(self, block, channel, block_num, stride=1):
downsample = None
if stride != 1 or self.in_channel != channel * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(channel * block.expansion))
layers = []
layers.append(block(self.in_channel,
channel,
downsample=downsample,
stride=stride,
groups=self.groups,
width_per_group=self.width_per_group))
self.in_channel = channel * block.expansion
for _ in range(1, block_num):
layers.append(block(self.in_channel,
channel,
groups=self.groups,
width_per_group=self.width_per_group))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet34(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnet34-333f7ec4.pth
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet50(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnet50-19c8e357.pth
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet101(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)
def resnext50_32x4d(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
groups = 32
width_per_group = 4
return ResNet(Bottleneck, [3, 4, 6, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)
def resnext101_32x8d(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth
groups = 32
width_per_group = 8
return ResNet(Bottleneck, [3, 4, 23, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)

Binary file not shown.

View File

@ -1,372 +0,0 @@
import torch
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt
import cv2
import time
import os
from model import resnet34
import json
import serial
from datetime import datetime
from scipy.optimize import curve_fit
import numpy as np
import re
import json
import Find_COM
def get_picture(frame, typ=0, date=''): # 获取照片
# 捕获一帧的数据
# ret, frame = cap.read()
if frame is None:
print(frame)
# if ret:
# # 默认不阻塞
# cv2.imshow("picture", frame)
# 数据帧写入图片中
label = "1"
timeStamp = 1381419600
if typ:
image_name = f'{date}{int(time.time())}.jpg'
else:
image_name = f'{date}PH{int(time.time())}.jpg'
# 照片存储位置
filepath = "Input/" + image_name # 改成跟上面一样的位置
str_name = filepath.replace('%s', label)
cv2.imwrite(str_name, frame) # 将照片保存起来
return image_name
def start_move_1(ser): # 抽取原料
# 注意这里我们将控制器的模式切换成了20ml注射泵模式由于丝杠的区别这里设定的速度为实际速度ml/min的一半
data = b"q1h12d" # 每分钟加样24ml
ser.write(data)
time.sleep(0.01)
data = b"q4h0d" # 转0分钟
ser.write(data)
time.sleep(0.01)
data = b"q5h30d" # 转30秒
ser.write(data) # 合计抽取12ml
time.sleep(0.01)
data = b"q6h3d" # 抽取
ser.write(data)
time.sleep(30) # 等待抽取
print('完成抽取')
# ser.close()
def start_move_2(ser): # 缓慢加样程序
data = b"q1h1d" # 每分钟加样3ml每秒0.1ml
ser.write(data)
time.sleep(0.01)
data = b"q2h50d" # 每分钟加样6ml每秒0.1ml
ser.write(data)
time.sleep(0.01)
data = b"q4h30d" # 转0分钟
ser.write(data)
time.sleep(0.01)
data = b"q5h0d" # 转1秒
ser.write(data) # 合计进样12ml
time.sleep(0.01)
data = b"q6h2d" # 进样
ser.write(data)
time.sleep(1)
# 注意,这里没有将阀门转回去,而是持续几秒钟滴加一次的状态
# ser.close()
def start_move_4(ser): # 缓慢加样程序0.2
data = b"q1h6d" # 每分钟加样12ml每秒0.2ml
ser.write(data)
time.sleep(0.01)
data = b"q2h0d" # 每分钟加样6ml每秒0.1ml
ser.write(data)
time.sleep(0.01)
data = b"q4h30d" # 转30分钟
ser.write(data)
time.sleep(0.01)
data = b"q5h0d" # 转0秒
ser.write(data) # 持续进样
time.sleep(0.01)
data = b"q6h2d" # 进样
ser.write(data)
time.sleep(1)
# 注意,这里没有将阀门转回去,而是持续几秒钟滴加一次的状态
# ser.close()
def start_move_3(ser): # 停止加酸程序
data = b"q6h6d" # 停止指令
ser.write(data)
# 将阀门转回去
ser.close()
def read_number_new(filepath):
from paddleocr import PaddleOCR, draw_ocr
# 创建一个OCR实例配置语言为中文
ocr = PaddleOCR(use_angle_cls=True, lang="ch")
# 对图片进行OCR识别
img_path = filepath
result = ocr.ocr(img_path, cls=True)
print('-----------------------------------------')
print(result)
ans = []
for line in result:
if line:
for line1 in line:
# print(line1[-1])
# print(line1[-1][0])
try:
ans.append(float(line1[-1][0]))
except:
continue
print(ans)
if not ans:
ans.append(10)
return ans
# 定义反正切函数
def poly_func(x, a, b, c, d):
return a * np.tanh(d * x + b) + c
def line_chart(date="1", volume_list=[], voltage_list=[], color_list=[]):
x = volume_list
y = voltage_list
z = color_list
# '''
fig, ax1 = plt.subplots()
plt.title("titration curve")
# 绘制第一个Y轴的数据绘制电位曲线
color = 'tab:red'
ax1.set_xlabel('value')
ax1.set_ylabel('voltage', color=color)
ax1.plot(x, y, color=color, antialiased=True)
ax1.tick_params(axis='y', labelcolor=color)
# 创建一个共享X轴的第二个Y轴绘制颜色曲线
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('color', color=color)
print(x,z)
ax2.plot(x, z, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_yticks([0, 1]) # 设置Y轴的刻度位置
ax2.set_yticklabels(['yellow', 'orange']) # 设置Y轴的刻度标签
ax2.spines['right'].set_position(('outward', 60)) # 将第三个Y轴向右移动
# ax2.tick_params(axis='y', labelcolor='none')
try:
# 初始参数估计
popt, pcov = curve_fit(poly_func, x, y, p0=[max(y)*3/4, -max(x), max(y), 1.5])
# 打印最优参数
print("最优参数:", popt)
print(f'电位突跃点:{-popt[1]/popt[3]:.3f}')
# print(max(x))
x_d = np.arange(0, max(x), 0.05)
# 使用拟合得到的参数计算二阶导数
y_fit = poly_func(x_d, *popt)
# 计算一阶微商(即电位对体积的导数)
dE_dV = np.gradient(y_fit)
# 计算二阶微商
d2E_dV2 = np.gradient(dE_dV)
y2 = d2E_dV2.tolist()
# y2 = dE_dV.tolist()
# y2 = y_fit
# 创建一个共享X轴的第3个Y轴绘制二阶导
ax3 = ax1.twinx()
color = 'tab:green'
ax3.set_ylabel('2nd Derivative', color=color)
ax3.plot(x_d, y2, color=color)
# ax3.plot(x_d, y_fit, color=color)
ax3.tick_params(axis='y', labelcolor=color)
ax3.grid(True, linestyle='--', linewidth=0.5, color='gray', axis='both')
# 画出电位突变点
x_d, y_d = -popt[1]/popt[3], 0.0
ax3.plot(x_d, y_d, 'ro') # 'ro' 表示红色圆圈,'r' 表示红色,'o' 表示圆圈4\4
# 标注坐标
ax3.annotate(f'({x_d:.2f})', # 标注的文本,使用格式化字符串显示坐标
xy=(x_d, y_d), # 标注指向的点
color='red', # 标注文本的颜色
xytext=(x_d-1, y_d + max(y2)/10) # 标注文本的位置,这里相对于点的位置稍微偏移
)
# 画出视觉突变点
x_c, y_c = x[xz] - 0.025, 0.0
ax3.plot(x_c, y_c, 'bo') # 'bo' 表示蓝色圆圈,'b' 表示蓝色,'o' 表示圆圈4\4
# 标注坐标
ax3.annotate(f'({x_c:.2f})', # 标注的文本,使用格式化字符串显示坐标
xy=(x_c, y_c), # 标注指向的点
color='blue', # 标注文本的颜色
xytext=(x_c - 1, y_c - max(y2) / 10) # 标注文本的位置,这里相对于点的位置稍微偏移
)
print(f"视觉突跃点:{x_c:.3f}")
# '''
except Exception as e:
print(e)
pass
fig.tight_layout() # 自动调整子图参数, 使之填充整个图像区域
plt.savefig(f'Output/{date}.png')
# plt.savefig('O1.png')
plt.show()
plt.pause(1)
plt.close()
def predictor(im_file, device):
# 使用PIL库打开图片
image = Image.open(im_file)
# 定义图片预处理流程
data_transform = transforms.Compose(
[
# 调整图片大小为256x256
transforms.Resize(256),
# 从中心裁剪出224x224大小的图片
transforms.CenterCrop(224),
# 将图片转换为PyTorch的Tensor格式
transforms.ToTensor(),
# 对图片进行归一化使用ImageNet的均值和标准差
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# [N, C, H, W]
img = data_transform(image)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)
# 定义模型分类文件的路径
json_path = './class_indices.json'
with open(json_path, "r") as f:
class_indict = json.load(f)
# create model
model = resnet34(num_classes=2).to(device) # 根据分类数量修改
# load model weights
weights_path = "./resnet34-1Net.pth" # 根据实际需要使用的模型名称修改
assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
model.load_state_dict(torch.load(weights_path, map_location=device))
# prediction
model.eval()
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu()
# 对预测结果进行softmax得到每个类别的概率
predict = torch.softmax(output, dim=0)
# 找到概率最大的类别的索引
predict_cla = torch.argmax(predict).numpy()
# 根据索引从类别字典中获取类别名称
class_a = "{}".format(class_indict[str(predict_cla)])
# 格式化概率值,保留三位小数
prob_a = "{:.3}".format(predict[predict_cla].numpy())
# 将概率值转换为浮点数
prob_b = float(prob_a)
# 打印预测的类别和概率
print(class_a)
print(prob_b)
return class_a, prob_b
def voltage(ser):
# data = "VOL|" # 每分钟加样12ml每秒0.2ml
ser.write("VOL|\n".encode())
time.sleep(0.1) # 等待设备响应
while True:
# 读取响应
response = ser.readline().decode().strip()
if response:
# print(f"设备响应: {response}")
# if "END" in response:
# break
try:
return float(response)
except:
pass
def main():
# port = "COM11" # 串口名,根据实际情况修改
port = Find_COM.list_ch340_ports()[0] # 串口名,根据实际情况修改
baudrate = 9600 # 波特率,根据实际情况修改
pump_ser = serial.Serial(port, baudrate)
# port_USB = []
port_USB = Find_COM.list_USB_ports() # 串口名,根据实际情况修改
if port_USB:
USB_ser = serial.Serial(port_USB[0], baudrate=115200, timeout=1)
# print(voltage(USB_ser))
videoSourceIndex = 0 # 摄像机编号,请根据自己的情况调整
cap = cv2.VideoCapture(videoSourceIndex, cv2.CAP_DSHOW) # 打开摄像头
# 是否用GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 循环开始之前需要一个变量来记录初始状态 比如说就叫color_type
total_volume = 0
now_volume = 0
volume_list = []
voltage_list = []
color_list = []
start_time = time.time()
# 将时间戳转换为datetime对象
dt_object = datetime.fromtimestamp(start_time)
# 格式化datetime对象为字符串该时间用于保存图像名称
formatted_time = dt_object.strftime('%Y%m%d_%H%M%S')
print("实验开始于", formatted_time)
n = 10
total_n = n
start_move_2(pump_ser)
while True:
total_volume += 1
volume_list.append(total_volume)
# 读取图片
ret, frame = cap.read()
name = get_picture(frame, 0, formatted_time)
# 图片完整路径
im_file = 'Input/' + name
cv2.imshow('Color', frame)
cv2.waitKey(1)
class_a ,prob_b =predictor(im_file,device)
volume_list.append(total_volume)
if port_USB:
voltage_list.append(voltage(USB_ser))
if class_a == "orange" and prob_b > 0.5: # 判断终点
# 如果判断为终点
# 使用两个空列表用来记录后续五次的判断结果
start_move_3(pump_ser)
print('----->>Visual Endpoint<<-----')
print(volume_list[-1])
print(im_file)
color_list.append(1)
break
color_list.append(0)
print(total_volume)
print(volume_list)
print(voltage_list)
print(color_list)
with open(f'Output/{formatted_time}.json', 'w') as f:
# 使用json.dump()将列表保存到文件
json.dump({"volume_list": volume_list, 'voltage_list': voltage_list, 'color_list': color_list}, f)
# 关闭串口
pump_ser.close()
if port_USB:
USB_ser.close()
line_chart(formatted_time, volume_list = volume_list, voltage_list = voltage_list, color_list = color_list)
if __name__ == "__main__":
import warnings
# 忽略所有警告
warnings.filterwarnings('ignore')
main()

View File

@ -1,170 +0,0 @@
import torch
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt
import cv2
import time
import os
from model import resnet34
import json
import serial
import Find_COM
def get_picture(cap): # 获取照片
# 捕获一帧的数据
ret, frame = cap.read()
if frame is None:
print(frame)
if ret:
# 默认不阻塞
cv2.imshow("picture", frame)
cv2.waitKey(1)
# 数据帧写入图片中
label = "1"
timeStamp = 1381419600
image_name = str(int(time.time())) + ".jpg"
# 照片存储位置
filepath = "Input/" + image_name # 改成跟上面一样的位置
str_name = filepath.replace('%s', label)
cv2.imwrite(str_name, frame) # 将照片保存起来
return image_name
def start_move_1(port, baudrate): # 快速加酸程序
ser = serial.Serial(port, baudrate)
data = b"q1h15d" # 每分钟转15圈一个比慢滴略高的旋转速度
ser.write(data)
time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(20) # 等待20秒为快滴时间注意这里等待时间不能少于1秒阀门开启的时间
data = b"q6h2d" # 顺时针
ser.write(data)
time.sleep(1) # 转回去的时间
ser.close()
def start_move_2(port, baudrate): # 缓慢加酸程序
ser = serial.Serial(port, baudrate)
data = b"q1h14d" # 每分钟转14圈每秒转14/60=0.233圈,可以根据实际情况调整
ser.write(data)
time.sleep(0.01)
# 注意:实际上我们也可以修改速度的小数部分,如下列注释所示
# data = b"q2h50d" # 结合q1指令每分钟转14.5圈,可以根据实际情况调整
# ser.write(data)
# time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(1) # 转阀门的时间
# 注意,这里没有将阀门转回去,而是持续几秒钟滴加一次的状态
ser.close()
def start_move_3(port, baudrate): # 停止加酸程序
ser = serial.Serial(port, baudrate)
data = b"q1h14d" # 每分钟转14圈需要与move2的速度保持一致
ser.write(data)
time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h2d" # 顺时针
ser.write(data)
time.sleep(1) # 转阀门的时间
# 将阀门转回去
ser.close()
def main():
# port = "COM6" # 串口名,根据实际情况修改
port = Find_COM.list_ch340_ports()[0] # 串口名,根据实际情况修改
baudrate = 9600 # 波特率,根据实际情况修改
# # 快速滴加过程,这里请自己根据滴加量优化
# start_move_1(port, baudrate)
# time.sleep(15)
videoSourceIndex = 0 # 摄像机编号,请根据自己的情况调整
cap = cv2.VideoCapture(videoSourceIndex, cv2.CAP_DSHOW) # 打开摄像头
# 是否用GPU
device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu")
start_move_2(port, baudrate) # 开启慢滴状态
while True:
# 读取图片
name = get_picture(cap)
# 图片完整路径
im_file = 'Input/' + name
# 使用PIL库打开图片
image = Image.open(im_file)
# print(type(image)) # 打印图片的类型
# 定义图片预处理流程
data_transform = transforms.Compose(
[
# 调整图片大小为256x256
transforms.Resize(256),
# 从中心裁剪出224x224大小的图片
transforms.CenterCrop(224),
# 将图片转换为PyTorch的Tensor格式
transforms.ToTensor(),
# 对图片进行归一化使用ImageNet的均值和标准差
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# [N, C, H, W]
img = data_transform(image)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)
# 定义模型权重文件的路径
json_path = './class_indices.json'
with open(json_path, "r") as f:
class_indict = json.load(f)
# create model
model = resnet34(num_classes=2).to(device)
# load model weights
weights_path = "./resnet34-1Net.pth"
assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
model.load_state_dict(torch.load(weights_path, map_location=device, weights_only=True))
# prediction
model.eval()
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu()
# 对预测结果进行softmax得到每个类别的概率
predict = torch.softmax(output, dim=0)
# 找到概率最大的类别的索引
predict_cla = torch.argmax(predict).numpy()
# 根据索引从类别字典中获取类别名称
class_a = "{}".format(class_indict[str(predict_cla)])
# 格式化概率值,保留三位小数
prob_a = "{:.3}".format(predict[predict_cla].numpy())
# 将概率值转换为浮点数
prob_b = float(prob_a)
# 打印预测的类别和概率
print(class_a)
print(prob_b)
if class_a == "orange" and prob_b >= 0.5: # 到达滴定终点
# 关闭阀门
start_move_3(port, baudrate)
print('----->>End<<-----')
print(im_file)
time.sleep(1)
# 释放摄像头
cap.release()
# 关闭所有OpenCV窗口
cv2.destroyAllWindows()
break
time.sleep(1) # 拍照间隔
if True:
main()

View File

@ -1 +0,0 @@
0ad4c44919ed3fa3ac4542f40e71fcb84b1d6bb8

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:eded1e9cf905cceb40b6eadf6ec9979852f4ddaf2d1d470cf681f1fbf0d3a03a
size 85279422

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

View File

@ -1 +0,0 @@
data

View File

@ -1 +0,0 @@
307ac081f349fd7d9203ab27ec6b3dcb5546aca0

View File

@ -1,10 +0,0 @@
## 该文件夹存放使用pytorch实现的代码版本
**model.py** 是模型文件
**train.py** 是调用模型训练的文件
**predict.py** 是调用模型进行预测的文件
**class_indices.json** 是训练数据集对应的标签文件
**data** 文件夹里分为了train和val两部分分别用来训练和验证只要将完成分类的照片分别放在“orange”或“yellow”子文件夹中即可
注意:现在文件夹中只有少量的示例图片,如果直接拿来训练结果不会很理想,毕竟,我也不好把所有工作都做完了,这样怎么体现你们的工作呢?对吧
训练完别忘了把生成的权重文件复制到控制程序中使用

View File

@ -1,4 +0,0 @@
{
"0": "orange",
"1": "yellow"
}

View File

@ -1 +0,0 @@
proc*

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Some files were not shown because too many files have changed in this diff Show More