Former-commit-id: 43bc977a970a5bba09d0afa6f2a85169fe1ed253
This commit is contained in:
2025-05-15 23:49:13 +08:00
commit 2c2822ff11
720 changed files with 2735 additions and 0 deletions

2
.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
# Auto detect text files and perform LF normalization
* text=auto

39
.gitignore vendored Normal file
View File

@ -0,0 +1,39 @@
Auto_Ctrl/Output/20250318_171231.png
Auto_Ctrl/Output/20250318_171231.json
Auto_Ctrl/Output/20250318_171130.json
Auto_Ctrl/Output/20250318_170605.json
Auto_Ctrl/Output/20250318_170313.json
Auto_Ctrl/Output/20250318_164834.json
Auto_Ctrl/Output/20250318_164605.json
Auto_Ctrl/Output/20250318_164343.json
Auto_Ctrl/Output/20250318_163705.json
Auto_Ctrl/Output/20250318_163420.json
Auto_Ctrl/Input/20250318_171231PH1742289186.jpg
Auto_Ctrl/Input/20250318_171231PH1742289183.jpg
Auto_Ctrl/Input/20250318_171130PH1742289124.jpg
Auto_Ctrl/Input/20250318_171130PH1742289122.jpg
Auto_Ctrl/Input/20250318_170605PH1742288800.jpg
Auto_Ctrl/Input/20250318_170605PH1742288797.jpg
Auto_Ctrl/Input/20250318_170313PH1742288628.jpg
Auto_Ctrl/Input/20250318_170313PH1742288625.jpg
Auto_Ctrl/Input/20250318_164834PH1742287749.jpg
Auto_Ctrl/Input/20250318_164834PH1742287746.jpg
Auto_Ctrl/Input/20250318_164605PH1742287600.jpg
Auto_Ctrl/Input/20250318_164605PH1742287597.jpg
Auto_Ctrl/Input/20250318_164343PH1742287458.jpg
Auto_Ctrl/Input/20250318_164343PH1742287455.jpg
Auto_Ctrl/Input/20250318_163705PH1742287060.jpg
Auto_Ctrl/Input/20250318_163705PH1742287057.jpg
Auto_Ctrl/Input/20250318_163420PH1742286864.jpg
Auto_Ctrl/Input/20250318_163420PH1742286862.jpg
Auto_Ctrl/Input/20250318_162246PH1742286168.jpg
Auto_Ctrl/Input/20250318_162206PH1742286128.jpg
Auto_Ctrl/Input/20250318_161933PH1742286005.jpg
Auto_Ctrl/Input/20250318_161816PH1742285928.jpg
Auto_Ctrl/Input/1742285621.jpg
Auto_Ctrl/Input/1742285618.jpg
__pycache__
.idea

65
Auto_Ctrl/Find_COM.py Normal file
View File

@ -0,0 +1,65 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: CH340.py
Author: Zinc Zou
Email: zinczou@163.com
Date: 2024/10/11
Copyright: 慕乐网络科技(大连)有限公司
www.mools.net
moolsnet@126.com
Description:
"""
import serial.tools.list_ports
def list_ch340_ports():
ports = serial.tools.list_ports.comports()
ch340_ports_list = []
# print(ports)
for port in ports:
if 'CH340' in port.description or 'CH340' in port.device:
ch340_ports_list.append(port.device)
print("Found CH340 ports:", port.device)
if ch340_ports_list:
return ch340_ports_list
else:
return []
def list_USB_ports():
ports = serial.tools.list_ports.comports()
USB_ports_list = []
# print(ports)
for port in ports:
if '串行' in port.description or '串行' in port.device:
USB_ports_list.append(port.device)
print("Found USB ports:", port.device)
if USB_ports_list:
return USB_ports_list
else:
return []
if __name__ == "__main__":
ports = list(serial.tools.list_ports.comports())
if len(ports) == 0:
print('No port available')
else:
for port in ports:
print(port)
port = list_ch340_ports()[0] # 串口名,根据实际情况修改
baudrate = 9600 # 波特率,根据实际情况修改
pump_ser = serial.Serial(port, baudrate)
port_USB = list_USB_ports()[0] # 串口名,根据实际情况修改
baudrate = 115200 # 波特率,根据实际情况修改
if port_USB:
USB_ser = serial.Serial(port, baudrate)
# ch340_ports = list_ch340_ports()
# if ch340_ports:
# print("Found CH340 ports:", ch340_ports)
# else:
# print("No CH340 ports found.")

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

23
Auto_Ctrl/README.md Normal file
View File

@ -0,0 +1,23 @@
# 基于计算机视觉的AI滴定控制装置
## Mlabs AI Titration 1.0
## **[慕乐网络科技(大连)有限公司, MoolsNet](https://www.mools.net/)**
![Logo](组合logo.png?raw=true)
## 本文件夹存放自动滴定控制代码
**predictor_burette.py** 是滴定管版本的程序文件
**predictor_Syringe_Pump.py** 是注射器版本的程序文件
**predictor_Syringe_Pump.py**:是蠕动泵版本的程序文件
**resnet34-1Net.pth 等** 是调用的权重文件,由训练程序获得
**class_indices.json** 记录了分类信息,需要与训练程序一致
**burette_clearair.py** 简单的电机控制程序,用来清空滴定管内的气泡
**burette_velocity.py** 简单的电机控制程序,用来调整主程序运行时阀门的开度,达到一滴一滴的效果
如果不清楚程序使用的串口号,请打开电脑的设备管理器-COM串口找到对应的CH340串口对应的串口号或参考视频教程

View File

@ -0,0 +1,24 @@
import serial
import time
port = "COM5" # 串口名,根据实际情况修改
baudrate = 9600 # 波特率
ser = serial.Serial(port, baudrate)
data = b"q1h16d" # 设定为每分钟转16圈的模式比慢滴模式角度稍微大一点点
ser.write(data)
time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(3.01) # 这个等待时间决定了阀门开启时间
data = b"q6h2d" # 顺时针
ser.write(data)
ser.close() # 关闭串口

View File

@ -0,0 +1,35 @@
import serial
import time
port = "COM5" # 串口名,根据实际情况修改
baudrate = 9600 # 波特率,根据实际情况修改
ser = serial.Serial(port, baudrate)
# 这里模拟的是开度略大的情况
data = b"q1h14d"
ser.write(data)
time.sleep(0.01)
data = b"q2h90d"
ser.write(data)
time.sleep(0.01)
# 以上是逆时针旋转的速度参数,由于开度略大,这里设定了一个比原始开度略小的速度
# 可以反复调节,以达到逐滴滴定的效果
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(5.01)
# 这里的等待时间决定了阀门开启时间不能小于1秒也就是阀门转开的时间
data = b"q1h15d"
ser.write(data)
time.sleep(0.01)
data = b"q2h0d"
ser.write(data)
time.sleep(0.01)
# 回转的速度参数,建议与主程序中慢滴的速度一致,保证正反转角度一致
data = b"q6h2d" # 顺时针
ser.write(data)
ser.close()

View File

@ -0,0 +1,4 @@
{
"0": "orange",
"1": "yellow"
}

198
Auto_Ctrl/model.py Normal file
View File

@ -0,0 +1,198 @@
import torch.nn as nn
import torch
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""
注意原论文中在虚线残差结构的主分支上第一个1x1卷积层的步距是2第二个3x3卷积层步距是1。
但在pytorch官方实现过程中是第一个1x1卷积层的步距是1第二个3x3卷积层步距是2
这么做的好处是能够在top1上提升大概0.5%的准确率。
可参考Resnet v1.5 https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch
"""
expansion = 4
def __init__(self, in_channel, out_channel, stride=1, downsample=None,
groups=1, width_per_group=64):
super(Bottleneck, self).__init__()
width = int(out_channel * (width_per_group / 64.)) * groups
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width,
kernel_size=1, stride=1, bias=False) # squeeze channels
self.bn1 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups,
kernel_size=3, stride=stride, bias=False, padding=1)
self.bn2 = nn.BatchNorm2d(width)
# -----------------------------------------
self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel*self.expansion,
kernel_size=1, stride=1, bias=False) # unsqueeze channels
self.bn3 = nn.BatchNorm2d(out_channel*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
blocks_num,
num_classes=1000,
include_top=True,
groups=1,
width_per_group=64):
super(ResNet, self).__init__()
self.include_top = include_top
self.in_channel = 64
self.groups = groups
self.width_per_group = width_per_group
self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.in_channel)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, blocks_num[0])
self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)
self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)
self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)
if self.include_top:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def _make_layer(self, block, channel, block_num, stride=1):
downsample = None
if stride != 1 or self.in_channel != channel * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(channel * block.expansion))
layers = []
layers.append(block(self.in_channel,
channel,
downsample=downsample,
stride=stride,
groups=self.groups,
width_per_group=self.width_per_group))
self.in_channel = channel * block.expansion
for _ in range(1, block_num):
layers.append(block(self.in_channel,
channel,
groups=self.groups,
width_per_group=self.width_per_group))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet34(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnet34-333f7ec4.pth
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet50(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnet50-19c8e357.pth
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet101(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)
def resnext50_32x4d(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
groups = 32
width_per_group = 4
return ResNet(Bottleneck, [3, 4, 6, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)
def resnext101_32x8d(num_classes=1000, include_top=True):
# https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth
groups = 32
width_per_group = 8
return ResNet(Bottleneck, [3, 4, 23, 3],
num_classes=num_classes,
include_top=include_top,
groups=groups,
width_per_group=width_per_group)

BIN
Auto_Ctrl/my_model.pkl Normal file

Binary file not shown.

View File

@ -0,0 +1,372 @@
import torch
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt
import cv2
import time
import os
from model import resnet34
import json
import serial
from datetime import datetime
from scipy.optimize import curve_fit
import numpy as np
import re
import json
import Find_COM
def get_picture(frame, typ=0, date=''): # 获取照片
# 捕获一帧的数据
# ret, frame = cap.read()
if frame is None:
print(frame)
# if ret:
# # 默认不阻塞
# cv2.imshow("picture", frame)
# 数据帧写入图片中
label = "1"
timeStamp = 1381419600
if typ:
image_name = f'{date}{int(time.time())}.jpg'
else:
image_name = f'{date}PH{int(time.time())}.jpg'
# 照片存储位置
filepath = "Input/" + image_name # 改成跟上面一样的位置
str_name = filepath.replace('%s', label)
cv2.imwrite(str_name, frame) # 将照片保存起来
return image_name
def start_move_1(ser): # 抽取原料
# 注意这里我们将控制器的模式切换成了20ml注射泵模式由于丝杠的区别这里设定的速度为实际速度ml/min的一半
data = b"q1h12d" # 每分钟加样24ml
ser.write(data)
time.sleep(0.01)
data = b"q4h0d" # 转0分钟
ser.write(data)
time.sleep(0.01)
data = b"q5h30d" # 转30秒
ser.write(data) # 合计抽取12ml
time.sleep(0.01)
data = b"q6h3d" # 抽取
ser.write(data)
time.sleep(30) # 等待抽取
print('完成抽取')
# ser.close()
def start_move_2(ser): # 缓慢加样程序
data = b"q1h1d" # 每分钟加样3ml每秒0.1ml
ser.write(data)
time.sleep(0.01)
data = b"q2h50d" # 每分钟加样6ml每秒0.1ml
ser.write(data)
time.sleep(0.01)
data = b"q4h30d" # 转0分钟
ser.write(data)
time.sleep(0.01)
data = b"q5h0d" # 转1秒
ser.write(data) # 合计进样12ml
time.sleep(0.01)
data = b"q6h2d" # 进样
ser.write(data)
time.sleep(1)
# 注意,这里没有将阀门转回去,而是持续几秒钟滴加一次的状态
# ser.close()
def start_move_4(ser): # 缓慢加样程序0.2
data = b"q1h6d" # 每分钟加样12ml每秒0.2ml
ser.write(data)
time.sleep(0.01)
data = b"q2h0d" # 每分钟加样6ml每秒0.1ml
ser.write(data)
time.sleep(0.01)
data = b"q4h30d" # 转30分钟
ser.write(data)
time.sleep(0.01)
data = b"q5h0d" # 转0秒
ser.write(data) # 持续进样
time.sleep(0.01)
data = b"q6h2d" # 进样
ser.write(data)
time.sleep(1)
# 注意,这里没有将阀门转回去,而是持续几秒钟滴加一次的状态
# ser.close()
def start_move_3(ser): # 停止加酸程序
data = b"q6h6d" # 停止指令
ser.write(data)
# 将阀门转回去
ser.close()
def read_number_new(filepath):
from paddleocr import PaddleOCR, draw_ocr
# 创建一个OCR实例配置语言为中文
ocr = PaddleOCR(use_angle_cls=True, lang="ch")
# 对图片进行OCR识别
img_path = filepath
result = ocr.ocr(img_path, cls=True)
print('-----------------------------------------')
print(result)
ans = []
for line in result:
if line:
for line1 in line:
# print(line1[-1])
# print(line1[-1][0])
try:
ans.append(float(line1[-1][0]))
except:
continue
print(ans)
if not ans:
ans.append(10)
return ans
# 定义反正切函数
def poly_func(x, a, b, c, d):
return a * np.tanh(d * x + b) + c
def line_chart(date="1", volume_list=[], voltage_list=[], color_list=[]):
x = volume_list
y = voltage_list
z = color_list
# '''
fig, ax1 = plt.subplots()
plt.title("titration curve")
# 绘制第一个Y轴的数据绘制电位曲线
color = 'tab:red'
ax1.set_xlabel('value')
ax1.set_ylabel('voltage', color=color)
ax1.plot(x, y, color=color, antialiased=True)
ax1.tick_params(axis='y', labelcolor=color)
# 创建一个共享X轴的第二个Y轴绘制颜色曲线
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('color', color=color)
print(x,z)
ax2.plot(x, z, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_yticks([0, 1]) # 设置Y轴的刻度位置
ax2.set_yticklabels(['yellow', 'orange']) # 设置Y轴的刻度标签
ax2.spines['right'].set_position(('outward', 60)) # 将第三个Y轴向右移动
# ax2.tick_params(axis='y', labelcolor='none')
try:
# 初始参数估计
popt, pcov = curve_fit(poly_func, x, y, p0=[max(y)*3/4, -max(x), max(y), 1.5])
# 打印最优参数
print("最优参数:", popt)
print(f'电位突跃点:{-popt[1]/popt[3]:.3f}')
# print(max(x))
x_d = np.arange(0, max(x), 0.05)
# 使用拟合得到的参数计算二阶导数
y_fit = poly_func(x_d, *popt)
# 计算一阶微商(即电位对体积的导数)
dE_dV = np.gradient(y_fit)
# 计算二阶微商
d2E_dV2 = np.gradient(dE_dV)
y2 = d2E_dV2.tolist()
# y2 = dE_dV.tolist()
# y2 = y_fit
# 创建一个共享X轴的第3个Y轴绘制二阶导
ax3 = ax1.twinx()
color = 'tab:green'
ax3.set_ylabel('2nd Derivative', color=color)
ax3.plot(x_d, y2, color=color)
# ax3.plot(x_d, y_fit, color=color)
ax3.tick_params(axis='y', labelcolor=color)
ax3.grid(True, linestyle='--', linewidth=0.5, color='gray', axis='both')
# 画出电位突变点
x_d, y_d = -popt[1]/popt[3], 0.0
ax3.plot(x_d, y_d, 'ro') # 'ro' 表示红色圆圈,'r' 表示红色,'o' 表示圆圈4\4
# 标注坐标
ax3.annotate(f'({x_d:.2f})', # 标注的文本,使用格式化字符串显示坐标
xy=(x_d, y_d), # 标注指向的点
color='red', # 标注文本的颜色
xytext=(x_d-1, y_d + max(y2)/10) # 标注文本的位置,这里相对于点的位置稍微偏移
)
# 画出视觉突变点
x_c, y_c = x[xz] - 0.025, 0.0
ax3.plot(x_c, y_c, 'bo') # 'bo' 表示蓝色圆圈,'b' 表示蓝色,'o' 表示圆圈4\4
# 标注坐标
ax3.annotate(f'({x_c:.2f})', # 标注的文本,使用格式化字符串显示坐标
xy=(x_c, y_c), # 标注指向的点
color='blue', # 标注文本的颜色
xytext=(x_c - 1, y_c - max(y2) / 10) # 标注文本的位置,这里相对于点的位置稍微偏移
)
print(f"视觉突跃点:{x_c:.3f}")
# '''
except Exception as e:
print(e)
pass
fig.tight_layout() # 自动调整子图参数, 使之填充整个图像区域
plt.savefig(f'Output/{date}.png')
# plt.savefig('O1.png')
plt.show()
plt.pause(1)
plt.close()
def predictor(im_file, device):
# 使用PIL库打开图片
image = Image.open(im_file)
# 定义图片预处理流程
data_transform = transforms.Compose(
[
# 调整图片大小为256x256
transforms.Resize(256),
# 从中心裁剪出224x224大小的图片
transforms.CenterCrop(224),
# 将图片转换为PyTorch的Tensor格式
transforms.ToTensor(),
# 对图片进行归一化使用ImageNet的均值和标准差
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# [N, C, H, W]
img = data_transform(image)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)
# 定义模型分类文件的路径
json_path = './class_indices.json'
with open(json_path, "r") as f:
class_indict = json.load(f)
# create model
model = resnet34(num_classes=2).to(device) # 根据分类数量修改
# load model weights
weights_path = "./resnet34-1Net.pth" # 根据实际需要使用的模型名称修改
assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
model.load_state_dict(torch.load(weights_path, map_location=device))
# prediction
model.eval()
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu()
# 对预测结果进行softmax得到每个类别的概率
predict = torch.softmax(output, dim=0)
# 找到概率最大的类别的索引
predict_cla = torch.argmax(predict).numpy()
# 根据索引从类别字典中获取类别名称
class_a = "{}".format(class_indict[str(predict_cla)])
# 格式化概率值,保留三位小数
prob_a = "{:.3}".format(predict[predict_cla].numpy())
# 将概率值转换为浮点数
prob_b = float(prob_a)
# 打印预测的类别和概率
print(class_a)
print(prob_b)
return class_a, prob_b
def voltage(ser):
# data = "VOL|" # 每分钟加样12ml每秒0.2ml
ser.write("VOL|\n".encode())
time.sleep(0.1) # 等待设备响应
while True:
# 读取响应
response = ser.readline().decode().strip()
if response:
# print(f"设备响应: {response}")
# if "END" in response:
# break
try:
return float(response)
except:
pass
def main():
# port = "COM11" # 串口名,根据实际情况修改
port = Find_COM.list_ch340_ports()[0] # 串口名,根据实际情况修改
baudrate = 9600 # 波特率,根据实际情况修改
pump_ser = serial.Serial(port, baudrate)
# port_USB = []
port_USB = Find_COM.list_USB_ports() # 串口名,根据实际情况修改
if port_USB:
USB_ser = serial.Serial(port_USB[0], baudrate=115200, timeout=1)
# print(voltage(USB_ser))
videoSourceIndex = 0 # 摄像机编号,请根据自己的情况调整
cap = cv2.VideoCapture(videoSourceIndex, cv2.CAP_DSHOW) # 打开摄像头
# 是否用GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 循环开始之前需要一个变量来记录初始状态 比如说就叫color_type
total_volume = 0
now_volume = 0
volume_list = []
voltage_list = []
color_list = []
start_time = time.time()
# 将时间戳转换为datetime对象
dt_object = datetime.fromtimestamp(start_time)
# 格式化datetime对象为字符串该时间用于保存图像名称
formatted_time = dt_object.strftime('%Y%m%d_%H%M%S')
print("实验开始于", formatted_time)
n = 10
total_n = n
start_move_2(pump_ser)
while True:
total_volume += 1
volume_list.append(total_volume)
# 读取图片
ret, frame = cap.read()
name = get_picture(frame, 0, formatted_time)
# 图片完整路径
im_file = 'Input/' + name
cv2.imshow('Color', frame)
cv2.waitKey(1)
class_a ,prob_b =predictor(im_file,device)
volume_list.append(total_volume)
if port_USB:
voltage_list.append(voltage(USB_ser))
if class_a == "orange" and prob_b > 0.5: # 判断终点
# 如果判断为终点
# 使用两个空列表用来记录后续五次的判断结果
start_move_3(pump_ser)
print('----->>Visual Endpoint<<-----')
print(volume_list[-1])
print(im_file)
color_list.append(1)
break
color_list.append(0)
print(total_volume)
print(volume_list)
print(voltage_list)
print(color_list)
with open(f'Output/{formatted_time}.json', 'w') as f:
# 使用json.dump()将列表保存到文件
json.dump({"volume_list": volume_list, 'voltage_list': voltage_list, 'color_list': color_list}, f)
# 关闭串口
pump_ser.close()
if port_USB:
USB_ser.close()
line_chart(formatted_time, volume_list = volume_list, voltage_list = voltage_list, color_list = color_list)
if __name__ == "__main__":
import warnings
# 忽略所有警告
warnings.filterwarnings('ignore')
main()

View File

@ -0,0 +1,280 @@
import torch
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt
import cv2
import time
import os
from model import resnet34
import serial
from datetime import datetime
from scipy.optimize import curve_fit
import numpy as np
import re
import json
import Find_COM
import builtins
class MAT:
def __init__(self, videoSourceIndex=0, weights_path = "resnet34-1Net.pth", json_path = 'class_indices.json', classes = 2):
print('实验初始化中')
self.data_root = os.getcwd()
self.videoSourceIndex = videoSourceIndex # 摄像机编号
self.cap = cv2.VideoCapture(videoSourceIndex, cv2.CAP_DSHOW) # 打开摄像头
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.port = Find_COM.list_ch340_ports()[0] # 串口名
self.pump_ser = serial.Serial(self.port, 9600) # 初始化串口
self.usb_port = Find_COM.list_USB_ports() # 串口名
if self.usb_port:
self.usb_ser = serial.Serial(self.usb_port, 115200) # 初始化串口
self.classes = classes
self.total_volume = 0 # 记录总体积
self.now_volume = 0 # 记录当前注射泵内体积
self.volume_list = [] # 记录体积变化
self.voltage_list = [] # 记录电位变化(如有需要)
self.color_list = [] # 记录颜色变化
self.start_time = time.time() # 记录实验开始时间
self.weights_path = os.path.join(self.data_root, weights_path) # 权重文件路径
self.json_path = os.path.join(self.data_root, json_path) # 类别文件路径
# 将开始时间转化为年月日时分秒的格式,后续文件命名都已此命名
self.formatted_time = datetime.fromtimestamp(self.start_time).strftime('%Y%m%d_%H%M%S')
print("实验开始于", self.formatted_time)
def get_picture(self, frame, typ=0, date=''): # 拍摄照片并保存
if frame is None:
print(frame)
image_name = f'{date}_{self.total_volume}.jpg' # 照片保存在Input文件夹下以开始时间+体积数的方式命名
filepath = os.path.join(self.data_root, "Input", image_name)
str_name = filepath.replace('%s', '1')
cv2.imwrite(str_name, frame)
return image_name
def start_move_1(self): # 抽料程序
data = b"q1h24d" # *2
self.pump_ser.write(data)
time.sleep(0.01)
data = b"q2h0d"
self.pump_ser.write(data)
time.sleep(0.01)
data = b"q4h0d"
self.pump_ser.write(data)
time.sleep(0.01)
data = b"q5h15d"
self.pump_ser.write(data)
time.sleep(0.01)
data = b"q6h3d"
self.pump_ser.write(data)
time.sleep(15)
print('完成抽取')
def start_move_2(self, speed=0.1): # 进料程序
# 计算单次滴定体积并传输至控制器
speed_min = speed * 30
speed_min_int = int(speed_min)
speed_min_float = int((speed_min - speed_min_int) * 100)
# print(speed_min_int, speed_min_float)
data = f"q1h{speed_min_int}d"
self.pump_ser.write(data.encode('ascii'))
time.sleep(0.01)
data = f"q2h{speed_min_float}d"
self.pump_ser.write(data.encode('ascii'))
time.sleep(0.01)
data = b"q4h0d"
self.pump_ser.write(data)
time.sleep(0.01)
data = b"q5h1d"
self.pump_ser.write(data)
time.sleep(0.01)
# 进料
data = b"q6h2d"
self.pump_ser.write(data)
time.sleep(1)
def start_move_3(self): # 进料急停
data = b"q6h6d"
self.pump_ser.write(data)
def voltage(self): # 测量电位
self.usb_ser.write("VOL|\n".encode())
time.sleep(0.1)
while True:
response = self.usb_ser.readline().decode().strip()
if response:
try:
return float(response)
except:
return 0
@staticmethod
def poly_func(x, a, b, c, d):
return a * np.tanh(d * x + b) + c
def line_chart(self):
x = self.volume_list
y = self.voltage_list
z = self.color_list
fig, ax1 = plt.subplots()
plt.title("titration curve")
color = 'tab:red'
ax1.set_xlabel('value')
ax1.set_ylabel('voltage', color=color)
ax1.plot(x, y, color=color, antialiased=True)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('color', color=color)
ax2.plot(x, z, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_yticks([0, 1])
ax2.set_yticklabels(['yellow', 'orange'])
ax2.spines['right'].set_position(('outward', 60))
try:
popt, pcov = curve_fit(self.poly_func, x, y, p0=[max(y) * 3 / 4, -max(x), max(y), 1.5])
print("最优参数:", popt)
print(f'电位突跃点:{-popt[1] / popt[3]:.3f}')
x_d = np.arange(0, max(x), 0.05)
y_fit = self.poly_func(x_d, *popt)
dE_dV = np.gradient(y_fit)
d2E_dV2 = np.gradient(dE_dV)
y2 = d2E_dV2.tolist()
ax3 = ax1.twinx()
color = 'tab:green'
ax3.set_ylabel('2nd Derivative', color=color)
ax3.plot(x_d, y2, color=color)
ax3.tick_params(axis='y', labelcolor=color)
ax3.grid(True, linestyle='--', linewidth=0.5, color='gray', axis='both')
x_d, y_d = -popt[1] / popt[3], 0.0
ax3.plot(x_d, y_d, 'ro')
ax3.annotate(f'({x_d:.2f})', xy=(x_d, y_d), color='red', xytext=(x_d - 1, y_d + max(y2) / 10))
except Exception as e:
print(e)
pass
fig.tight_layout()
plt.savefig(f'Output/{self.formatted_time}.png')
plt.show()
plt.pause(1)
plt.close()
def predictor(self, im_file): # 预测分类
image = Image.open(im_file)
data_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img = data_transform(image)
img = torch.unsqueeze(img, dim=0)
with open(self.json_path, "r") as f:
class_indict = json.load(f)
model = resnet34(num_classes=self.classes).to(self.device)
assert os.path.exists(self.weights_path), "file: '{}' dose not exist.".format(self.weights_path)
model.load_state_dict(torch.load(self.weights_path, map_location=self.device))
model.eval()
with torch.no_grad():
output = torch.squeeze(model(img.to(self.device))).cpu()
predict = torch.softmax(output, dim=0)
predict_cla = torch.argmax(predict).numpy()
class_a = "{}".format(class_indict[str(predict_cla)])
prob_a = "{:.3}".format(predict[predict_cla].numpy())
prob_b = float(prob_a)
print('class_:',class_a)
print('prob_:',prob_b)
return class_a, prob_b
def __del__(self):
# 绘制滴定曲线
# self.line_chart()
# 关闭串口和摄像头
self.pump_ser.close()
self.cap.release()
cv2.destroyAllWindows()
print("Experiment finished.")
def run(self,quick_speed = 0.2, slow_speed = 0.05,switching_point = 5, end_kind = 'orange', end_prob =0.5):
n = 1
total_n = n
while True:
if self.now_volume <= 0:
self.start_move_1() # 抽取12ml
self.now_volume += 12
if self.total_volume < switching_point: # 每次加0.2ml
speed = quick_speed
self.start_move_2(speed)
self.total_volume += speed
self.now_volume -= speed
else:
speed = slow_speed
self.start_move_2(speed) # 每次加0.05ml
self.total_volume += speed
self.now_volume -= speed
self.total_volume = round(self.total_volume, 3)
# 读取图片
ret, frame = self.cap.read()
if not ret:
print("Failed to capture frame from camera.")
break
name = self.get_picture(frame, 0, self.formatted_time)
im_file = 'Input/' + name
cv2.imshow('Color', frame)
cv2.waitKey(1)
class_a, prob_b = self.predictor(im_file)
self.volume_list.append(self.total_volume)
# 如果有电压测量设备,可以在这里读取电压
# self.voltage_list.append(self.voltage())
if class_a == end_kind and prob_b > end_prob: # 判断终点
print('----->>Visual Endpoint<<-----')
print(f"Total Volume: {self.total_volume} ml")
print(f"Image File: {im_file}")
self.color_list.append(1)
break
else:
self.color_list.append(0)
print(f"Current Total Volume: {self.total_volume} ml")
print("Volume List:", self.volume_list)
print("Voltage List:", self.voltage_list)
print("Color List:", self.color_list)
# 保存实验数据到JSON文件
with builtins.open(f'Output/{self.formatted_time}.json', 'w') as f:
json.dump(
{"volume_list": self.volume_list, 'voltage_list': self.voltage_list, 'color_list': self.color_list},
f)
if __name__ == "__main__":
import warnings
# 忽略所有警告
warnings.filterwarnings('ignore')
# 创建MAT类的实例并运行
mat = MAT(videoSourceIndex = 0, weights_path = "resnet34-1Net.pth", json_path = 'class_indices.json', classes = 2)
mat.run(quick_speed = 0.2, slow_speed = 0.05, switching_point = 5, end_kind = 'orange', end_prob = 0.5)

View File

@ -0,0 +1,170 @@
import torch
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt
import cv2
import time
import os
from model import resnet34
import json
import serial
import Find_COM
def get_picture(cap): # 获取照片
# 捕获一帧的数据
ret, frame = cap.read()
if frame is None:
print(frame)
if ret:
# 默认不阻塞
cv2.imshow("picture", frame)
cv2.waitKey(1)
# 数据帧写入图片中
label = "1"
timeStamp = 1381419600
image_name = str(int(time.time())) + ".jpg"
# 照片存储位置
filepath = "Input/" + image_name # 改成跟上面一样的位置
str_name = filepath.replace('%s', label)
cv2.imwrite(str_name, frame) # 将照片保存起来
return image_name
def start_move_1(port, baudrate): # 快速加酸程序
ser = serial.Serial(port, baudrate)
data = b"q1h15d" # 每分钟转15圈一个比慢滴略高的旋转速度
ser.write(data)
time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(20) # 等待20秒为快滴时间注意这里等待时间不能少于1秒阀门开启的时间
data = b"q6h2d" # 顺时针
ser.write(data)
time.sleep(1) # 转回去的时间
ser.close()
def start_move_2(port, baudrate): # 缓慢加酸程序
ser = serial.Serial(port, baudrate)
data = b"q1h14d" # 每分钟转14圈每秒转14/60=0.233圈,可以根据实际情况调整
ser.write(data)
time.sleep(0.01)
# 注意:实际上我们也可以修改速度的小数部分,如下列注释所示
# data = b"q2h50d" # 结合q1指令每分钟转14.5圈,可以根据实际情况调整
# ser.write(data)
# time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h3d" # 逆时针
ser.write(data)
time.sleep(1) # 转阀门的时间
# 注意,这里没有将阀门转回去,而是持续几秒钟滴加一次的状态
ser.close()
def start_move_3(port, baudrate): # 停止加酸程序
ser = serial.Serial(port, baudrate)
data = b"q1h14d" # 每分钟转14圈需要与move2的速度保持一致
ser.write(data)
time.sleep(0.01)
data = b"q5h1d" # 转1秒
ser.write(data)
time.sleep(0.01)
data = b"q6h2d" # 顺时针
ser.write(data)
time.sleep(1) # 转阀门的时间
# 将阀门转回去
ser.close()
def main():
# port = "COM6" # 串口名,根据实际情况修改
port = Find_COM.list_ch340_ports()[0] # 串口名,根据实际情况修改
baudrate = 9600 # 波特率,根据实际情况修改
# # 快速滴加过程,这里请自己根据滴加量优化
# start_move_1(port, baudrate)
# time.sleep(15)
videoSourceIndex = 0 # 摄像机编号,请根据自己的情况调整
cap = cv2.VideoCapture(videoSourceIndex, cv2.CAP_DSHOW) # 打开摄像头
# 是否用GPU
device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu")
start_move_2(port, baudrate) # 开启慢滴状态
while True:
# 读取图片
name = get_picture(cap)
# 图片完整路径
im_file = 'Input/' + name
# 使用PIL库打开图片
image = Image.open(im_file)
# print(type(image)) # 打印图片的类型
# 定义图片预处理流程
data_transform = transforms.Compose(
[
# 调整图片大小为256x256
transforms.Resize(256),
# 从中心裁剪出224x224大小的图片
transforms.CenterCrop(224),
# 将图片转换为PyTorch的Tensor格式
transforms.ToTensor(),
# 对图片进行归一化使用ImageNet的均值和标准差
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# [N, C, H, W]
img = data_transform(image)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)
# 定义模型权重文件的路径
json_path = './class_indices.json'
with open(json_path, "r") as f:
class_indict = json.load(f)
# create model
model = resnet34(num_classes=2).to(device)
# load model weights
weights_path = "./resnet34-1Net.pth"
assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
model.load_state_dict(torch.load(weights_path, map_location=device, weights_only=True))
# prediction
model.eval()
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu()
# 对预测结果进行softmax得到每个类别的概率
predict = torch.softmax(output, dim=0)
# 找到概率最大的类别的索引
predict_cla = torch.argmax(predict).numpy()
# 根据索引从类别字典中获取类别名称
class_a = "{}".format(class_indict[str(predict_cla)])
# 格式化概率值,保留三位小数
prob_a = "{:.3}".format(predict[predict_cla].numpy())
# 将概率值转换为浮点数
prob_b = float(prob_a)
# 打印预测的类别和概率
print(class_a)
print(prob_b)
if class_a == "orange" and prob_b >= 0.5: # 到达滴定终点
# 关闭阀门
start_move_3(port, baudrate)
print('----->>End<<-----')
print(im_file)
time.sleep(1)
# 释放摄像头
cap.release()
# 关闭所有OpenCV窗口
cv2.destroyAllWindows()
break
time.sleep(1) # 拍照间隔
if True:
main()

View File

@ -0,0 +1 @@
0ad4c44919ed3fa3ac4542f40e71fcb84b1d6bb8

View File

@ -0,0 +1 @@
e472b19b8c49e47bc1512dc9fb1539304cb68deb

BIN
Auto_Ctrl/组合logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

176
LICENSE Normal file
View File

@ -0,0 +1,176 @@
Automatic_Titration_Control
Version 1.0, March 2024
http://www.mools.net
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

View File

@ -0,0 +1,5 @@
MAT_1.2.2.exe 可执行文件网盘链接获取
https://pan.baidu.com/s/1H4sKhRJqt17unYj8XAs8NQ
提取码9km2

Binary file not shown.

View File

@ -0,0 +1 @@
307ac081f349fd7d9203ab27ec6b3dcb5546aca0

10
Picture_Train/README.md Normal file
View File

@ -0,0 +1,10 @@
## 该文件夹存放使用pytorch实现的代码版本
**model.py** 是模型文件
**train.py** 是调用模型训练的文件
**predict.py** 是调用模型进行预测的文件
**class_indices.json** 是训练数据集对应的标签文件
**data** 文件夹里分为了train和val两部分分别用来训练和验证只要将完成分类的照片分别放在“orange”或“yellow”子文件夹中即可
注意:现在文件夹中只有少量的示例图片,如果直接拿来训练结果不会很理想,毕竟,我也不好把所有工作都做完了,这样怎么体现你们的工作呢?对吧
训练完别忘了把生成的权重文件复制到控制程序中使用

View File

@ -0,0 +1,4 @@
{
"0": "orange",
"1": "yellow"
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Some files were not shown because too many files have changed in this diff Show More