|
|
|
@ -8,11 +8,9 @@ import org.wlld.imageRecognition.Operation;
|
|
|
|
|
import org.wlld.imageRecognition.Picture;
|
|
|
|
|
import org.wlld.imageRecognition.TempleConfig;
|
|
|
|
|
import org.wlld.imageRecognition.border.Frame;
|
|
|
|
|
import org.wlld.imageRecognition.border.FrameBody;
|
|
|
|
|
import org.wlld.nerveEntity.ModelParameter;
|
|
|
|
|
|
|
|
|
|
import java.util.HashMap;
|
|
|
|
|
import java.util.List;
|
|
|
|
|
import java.util.Map;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -22,16 +20,49 @@ import java.util.Map;
|
|
|
|
|
*/
|
|
|
|
|
public class HelloWorld {
|
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
|
|
|
testPic();
|
|
|
|
|
test();
|
|
|
|
|
//testPic();
|
|
|
|
|
//testModel();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public static void test() throws Exception {
|
|
|
|
|
Picture picture = new Picture();
|
|
|
|
|
TempleConfig templeConfig = new TempleConfig();
|
|
|
|
|
ModelParameter modelParameter = JSONObject.parseObject(ModelData.DATA, ModelParameter.class);
|
|
|
|
|
templeConfig.init(StudyPattern.Accuracy_Pattern, true, 3204, 4032, 1);
|
|
|
|
|
templeConfig.insertModel(modelParameter);
|
|
|
|
|
Operation operation = new Operation(templeConfig);
|
|
|
|
|
for (int i = 1; i < 120; i++) {//神经网络学习
|
|
|
|
|
System.out.println("study==" + i);
|
|
|
|
|
//读取本地URL地址图片,并转化成矩阵
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/c/c" + i + ".png");
|
|
|
|
|
Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b" + i + ".png");
|
|
|
|
|
//将图像矩阵和标注加入进行学习,Accuracy_Pattern 模式 进行第二次学习
|
|
|
|
|
//第二次学习的时候,第三个参数必须是 true
|
|
|
|
|
operation.learning(right, 1, true);
|
|
|
|
|
operation.learning(wrong, 0, true);
|
|
|
|
|
}
|
|
|
|
|
templeConfig.clustering();//进行聚类
|
|
|
|
|
|
|
|
|
|
for (int j = 121; j < 140; j++) {
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/c/c" + j + ".png");
|
|
|
|
|
Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b" + j + ".png");
|
|
|
|
|
int rightId = operation.toSee(right);
|
|
|
|
|
int wrongId = operation.toSee(wrong);
|
|
|
|
|
System.out.println("right==" + rightId);
|
|
|
|
|
System.out.println("wrong==" + wrongId);
|
|
|
|
|
}
|
|
|
|
|
ModelParameter modelParameter1 = templeConfig.getModel();
|
|
|
|
|
String a = JSON.toJSONString(modelParameter1);
|
|
|
|
|
System.out.println(a);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public static void testPic() throws Exception {
|
|
|
|
|
//测试SPEED模式学习过程
|
|
|
|
|
//初始化图像转矩阵类:作用就是说将一个图片文件转化为矩阵类
|
|
|
|
|
Picture picture = new Picture();
|
|
|
|
|
//初始化配置模板类,设置模式为SPEED_PATTERN模式 即速度模式
|
|
|
|
|
TempleConfig templeConfig = getTemple(true, StudyPattern.Speed_Pattern);
|
|
|
|
|
TempleConfig templeConfig = getTemple(true, StudyPattern.Speed_Pattern, false);
|
|
|
|
|
//初始化计算类,并将配置模版和输出回调类载入计算类
|
|
|
|
|
//运算类有两个构造一个是配置回调类,一个是不配置回调类
|
|
|
|
|
//若使用定位功能,则无需配置回调类,若不启用,则要配置回调类
|
|
|
|
@ -46,7 +77,7 @@ public class HelloWorld {
|
|
|
|
|
rightTagging.put(1, 1.0);
|
|
|
|
|
wrongTagging.put(1, 0.0);
|
|
|
|
|
// 例如上面的标注了 只有一种分类,第一个MAP是true标注,第二个map是false标注
|
|
|
|
|
for (int i = 1; i < 999; i++) {
|
|
|
|
|
for (int i = 1; i < 1000; i++) {
|
|
|
|
|
System.out.println("开始学习1==" + i);
|
|
|
|
|
//读取本地URL地址图片(适用于电脑本地图片),并转化成矩阵
|
|
|
|
|
//注意学习图片至少要一千张+同物体的不同图片,学习的越多就越准,拿同样的图片反复循环学习是没用的
|
|
|
|
@ -59,6 +90,15 @@ public class HelloWorld {
|
|
|
|
|
//wrong这个矩阵是错误的图片,所以要配置上面错误的标注0.0 学习 告诉计算机这个图片是错误的
|
|
|
|
|
operation.study(wrong, wrongTagging);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/test/a101.png");
|
|
|
|
|
Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b1000.png");
|
|
|
|
|
ma.setNub(1);
|
|
|
|
|
operation.look(wrong, 3);
|
|
|
|
|
ma.setNub(0);
|
|
|
|
|
operation.look(right, 2);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//如果启用物体坐标定位,则在学习结束的时候,一定要执行boxStudy方法
|
|
|
|
|
//若不启用,则请不要使用,否则会报错
|
|
|
|
|
//templeConfig.boxStudy();
|
|
|
|
@ -68,32 +108,43 @@ public class HelloWorld {
|
|
|
|
|
ModelParameter modelParameter = templeConfig.getModel();
|
|
|
|
|
//将模型MODEL转化成JSON 字符串 保存到数据库 留待下次服务启动的时候,识别提取用
|
|
|
|
|
String model = JSON.toJSONString(modelParameter);
|
|
|
|
|
System.out.println(model);
|
|
|
|
|
//以上就是SPEED模式下的学习全过程,识别的过程就是再次初始化,将学习结果注入之后使用
|
|
|
|
|
|
|
|
|
|
//识别过程
|
|
|
|
|
//将从数据库取出的JSON字符串转化为模型MODEL
|
|
|
|
|
ModelParameter modelParameter1 = JSONObject.parseObject(model, ModelParameter.class);
|
|
|
|
|
//初始化模型配置
|
|
|
|
|
TempleConfig templeConfig1 = getTemple(false, StudyPattern.Speed_Pattern);
|
|
|
|
|
//注入之前学习结果的模型MODEL到配置模版里面,将学习结果注入就可以使用识别了
|
|
|
|
|
templeConfig1.insertModel(modelParameter1);
|
|
|
|
|
//将配置模板配置到运算类
|
|
|
|
|
Operation operation1 = new Operation(templeConfig1);
|
|
|
|
|
//获取本地图片字节码转化成降纬后的灰度矩阵
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/test/a101.png");
|
|
|
|
|
Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b1000.png");
|
|
|
|
|
//进行图像识别 参数说明 eventId,事件id,因为输出结果是在回调类回调的,所以必须有个主键去判断事件
|
|
|
|
|
//说明你回调是响应的哪一次调用的ID,所以每一次识别调用,请用不同的id
|
|
|
|
|
operation1.look(wrong, 3);
|
|
|
|
|
operation1.look(right, 2);
|
|
|
|
|
// ModelParameter modelParameter1 = JSONObject.parseObject(model, ModelParameter.class);
|
|
|
|
|
// //初始化模型配置
|
|
|
|
|
// TempleConfig templeConfig1 = getTemple(false, StudyPattern.Speed_Pattern, false);
|
|
|
|
|
// //注入之前学习结果的模型MODEL到配置模版里面,将学习结果注入就可以使用识别了
|
|
|
|
|
// templeConfig1.insertModel(modelParameter1);
|
|
|
|
|
// //将配置模板配置到运算类
|
|
|
|
|
// Operation operation1 = new Operation(templeConfig1);
|
|
|
|
|
// //获取本地图片字节码转化成降纬后的灰度矩阵
|
|
|
|
|
// Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/test/a101.png");
|
|
|
|
|
// Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b1000.png");
|
|
|
|
|
// //进行图像识别 参数说明 eventId,事件id,因为输出结果是在回调类回调的,所以必须有个主键去判断事件
|
|
|
|
|
// //说明你回调是响应的哪一次调用的ID,所以每一次识别调用,请用不同的id
|
|
|
|
|
// operation1.look(wrong, 3);
|
|
|
|
|
// operation1.look(right, 2);
|
|
|
|
|
//若启用定位功能检测请使用lookWithPosition,若没有启用,使用检测会报错
|
|
|
|
|
//返回map,主键是分类id,值是该图片中此分类有多少个物体,每个物体的具体位置坐标的大小
|
|
|
|
|
//Map<Integer, List<FrameBody>> map = operation1.lookWithPosition(right, 4);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public static TempleConfig getTemple(boolean isFirst, int pattern) throws Exception {
|
|
|
|
|
public static TempleConfig getTemple(boolean isFirst, int pattern
|
|
|
|
|
, boolean isPosition) throws Exception {
|
|
|
|
|
//创建一个配置模板类,作用:主要是保存及载入一些配置参数用
|
|
|
|
|
TempleConfig templeConfig = new TempleConfig();
|
|
|
|
|
if (isPosition) {
|
|
|
|
|
templeConfig.setHavePosition(true);
|
|
|
|
|
Frame frame = new Frame();
|
|
|
|
|
frame.setWidth(3024);
|
|
|
|
|
frame.setHeight(4032);
|
|
|
|
|
frame.setLengthHeight(100);
|
|
|
|
|
frame.setLengthWidth(100);
|
|
|
|
|
templeConfig.setFrame(frame);
|
|
|
|
|
}
|
|
|
|
|
//全连接层深度,选填可不填 不填默认值为2
|
|
|
|
|
//这就像人类大脑的意识深度原理一样,深度学习越深,训练结果越准,但是训练量成几何倍数增加
|
|
|
|
|
//比如默认深度是2 需要 正负模板各一千+张照片进行训练。识别率70%(数值只是举个例子,不是具体数值)
|
|
|
|
@ -101,7 +152,7 @@ public class HelloWorld {
|
|
|
|
|
//以此类推,,内存允许的情况下,深度无限 识别率无限接近与百分之百
|
|
|
|
|
//但是有极限,即超过某个深度,即使再增加深度,识别率反而会下降。需要具体不断尝试找到 合适的深度
|
|
|
|
|
//注意:若深度提升,训练量没有成倍增长,则准确度反而更低!
|
|
|
|
|
templeConfig.setDeep(2);
|
|
|
|
|
//templeConfig.setDeep(2);
|
|
|
|
|
//启用定位学习 注意启用在图片中对某个物体进行定位,要注意
|
|
|
|
|
//学习的图片必须除了学习的物体以外,其他位置都是白色或者空白(即用PS扣空)。
|
|
|
|
|
//即该图片除了这个物体,没有其他任何干扰杂色(一个像素的杂色都不可以有)
|
|
|
|
@ -134,11 +185,11 @@ public class HelloWorld {
|
|
|
|
|
|
|
|
|
|
public static void testModel() throws Exception {
|
|
|
|
|
// 模型参数获取及注入 实例
|
|
|
|
|
TempleConfig templeConfig = getTemple(true, StudyPattern.Accuracy_Pattern);
|
|
|
|
|
TempleConfig templeConfig = getTemple(true, StudyPattern.Accuracy_Pattern, false);
|
|
|
|
|
ModelParameter modelParameter1 = templeConfig.getModel();
|
|
|
|
|
String model = JSON.toJSONString(modelParameter1);
|
|
|
|
|
System.out.println(model);
|
|
|
|
|
TempleConfig templeConfig2 = getTemple(false, StudyPattern.Accuracy_Pattern);
|
|
|
|
|
TempleConfig templeConfig2 = getTemple(false, StudyPattern.Accuracy_Pattern, false);
|
|
|
|
|
ModelParameter modelParameter3 = JSONObject.parseObject(model, ModelParameter.class);
|
|
|
|
|
templeConfig2.insertModel(modelParameter3);
|
|
|
|
|
ModelParameter modelParameter2 = templeConfig2.getModel();
|
|
|
|
@ -150,14 +201,15 @@ public class HelloWorld {
|
|
|
|
|
public static void testPic2() throws Exception {
|
|
|
|
|
//测试Accuracy_Pattern 模式学习过程,跟SPEED模式相同的部分就不再说明了
|
|
|
|
|
Picture picture = new Picture();
|
|
|
|
|
TempleConfig templeConfig = getTemple(true, StudyPattern.Accuracy_Pattern);
|
|
|
|
|
Operation operation = new Operation(templeConfig);
|
|
|
|
|
TempleConfig templeConfig = getTemple(true, StudyPattern.Accuracy_Pattern, false);
|
|
|
|
|
Ma ma = new Ma();
|
|
|
|
|
Operation operation = new Operation(templeConfig, ma);
|
|
|
|
|
//标注主键为 第几种分类,值为标注 1 是TRUE 0是FALSE
|
|
|
|
|
Map<Integer, Double> rightTagging = new HashMap<>();//分类标注
|
|
|
|
|
Map<Integer, Double> wrongTagging = new HashMap<>();//分类标注
|
|
|
|
|
rightTagging.put(1, 1.0);
|
|
|
|
|
wrongTagging.put(1, 0.0);
|
|
|
|
|
for (int i = 1; i < 2; i++) {
|
|
|
|
|
for (int i = 1; i < 100; i++) {
|
|
|
|
|
System.out.println("开始学习1==" + i);
|
|
|
|
|
//读取本地URL地址图片,并转化成矩阵
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/c/c" + i + ".png");
|
|
|
|
@ -166,24 +218,35 @@ public class HelloWorld {
|
|
|
|
|
//这里使用learning方法,前两个参数与SPEED模式相同,多了一个第三个参数
|
|
|
|
|
//第一次学习的时候 这个参数必须是 false
|
|
|
|
|
//最后一个参数id
|
|
|
|
|
operation.learning(right, rightTagging, false);
|
|
|
|
|
operation.learning(wrong, wrongTagging, false);
|
|
|
|
|
operation.learning(right, 1, false);
|
|
|
|
|
operation.learning(wrong, 0, false);
|
|
|
|
|
}
|
|
|
|
|
for (int i = 1; i < 2; i++) {//神经网络学习
|
|
|
|
|
for (int i = 1; i < 300; i++) {//神经网络学习
|
|
|
|
|
System.out.println("开始学习2==" + i);
|
|
|
|
|
//读取本地URL地址图片,并转化成矩阵
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/c/c" + i + ".png");
|
|
|
|
|
Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b" + i + ".png");
|
|
|
|
|
//将图像矩阵和标注加入进行学习,Accuracy_Pattern 模式 进行第二次学习
|
|
|
|
|
//第二次学习的时候,第三个参数必须是 true
|
|
|
|
|
operation.learning(right, rightTagging, true);
|
|
|
|
|
operation.learning(wrong, wrongTagging, true);
|
|
|
|
|
operation.learning(right, 1, true);
|
|
|
|
|
operation.learning(wrong, 0, true);
|
|
|
|
|
}
|
|
|
|
|
ModelParameter modelParameter = templeConfig.getModel();
|
|
|
|
|
String st = JSON.toJSONString(modelParameter);
|
|
|
|
|
System.out.println(st);
|
|
|
|
|
for (int i = 300; i < 320; i++) {//神经网络学习
|
|
|
|
|
//读取本地URL地址图片,并转化成矩阵
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/c/c" + i + ".png");
|
|
|
|
|
Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b" + i + ".png");
|
|
|
|
|
//将图像矩阵和标注加入进行学习,Accuracy_Pattern 模式 进行第二次学习
|
|
|
|
|
//第二次学习的时候,第三个参数必须是 true
|
|
|
|
|
ma.setNub(1);
|
|
|
|
|
operation.look(right, 100 + i);
|
|
|
|
|
ma.setNub(0);
|
|
|
|
|
operation.look(wrong, 200 + i);
|
|
|
|
|
}
|
|
|
|
|
Matrix right = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/test/a101.png");
|
|
|
|
|
Matrix wrong = picture.getImageMatrixByLocal("/Users/lidapeng/Desktop/myDocment/b/b1000.png");
|
|
|
|
|
//进行图像识别,Accuracy_Pattern 模式学习结果获取和注入,跟SPEED模式一致
|
|
|
|
|
//若有疑问可以参考一下 testModel()方法
|
|
|
|
|
operation.look(right, 2);
|
|
|
|
|
operation.look(wrong, 3);
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|