C# Onnx實(shí)現(xiàn)特征匹配DeDoDe檢測(cè)
介紹
github地址:https://github.com/Parskatt/DeDoDe
DeDoDe ?? Detect, Don't Describe - Describe, Don't Detect, for Local Feature Matching
The DeDoDe detector learns to detect 3D consistent repeatable keypoints, which the DeDoDe descriptor learns to match. The result is a powerful decoupled local feature matcher.
Training DeDoDe
DISCLAMER: I've (Johan) not yet tested that the training scripts here reproduces our original results. This repo is very similar to the internal training repo, but there might be bugs introduced by refactoring etc. Let me know if you face any issues reproducing our results (or if you somehow get better results :D).
See experiments for the scripts to train DeDoDe. We trained on a single A100-40GB with a batchsize of 8. Note that you need to do the data prep first, see data_prep.
As usual, we require that you have the MegaDepth dataset already downloaded, and that you have the prepared scene info from DKM.
效果
模型信息
Inputs
-------------------------
name:images
tensor:Float[-1, 3, -1, -1]
---------------------------------------------------------------
Outputs
-------------------------
name:matches_A
tensor:Float[-1, -1]
name:matches_B
tensor:Float[-1, -1]
name:batch_ids
tensor:Int64[-1]
---------------------------------------------------------------
項(xiàng)目
VS2022
.net framework 4.8
OpenCvSharp 4.8
Microsoft.ML.OnnxRuntime 1.16.2
代碼
using Microsoft.ML.OnnxRuntime.Tensors; using Microsoft.ML.OnnxRuntime; using OpenCvSharp; using System; using System.Collections.Generic; using System.Windows.Forms; using System.Linq; using System.Drawing; using static System.Net.Mime.MediaTypeNames; using System.Numerics; namespace Onnx_Demo { public partial class frmMain : Form { public frmMain() { InitializeComponent(); } string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png"; string image_path = ""; string image_path2 = ""; DateTime dt1 = DateTime.Now; DateTime dt2 = DateTime.Now; int inpWidth; int inpHeight; float[] mean =new float[] { 0.485f, 0.456f, 0.406f }; float[] std = new float[] { 0.229f, 0.224f, 0.225f }; Mat image; Mat image2; string model_path = ""; SessionOptions options; InferenceSession onnx_session; Tensor<float> input_tensor; Tensor<float> mask_tensor; List<NamedOnnxValue> input_ontainer; IDisposableReadOnlyCollection<DisposableNamedOnnxValue> result_infer; DisposableNamedOnnxValue[] results_onnxvalue; private void button1_Click(object sender, EventArgs e) { OpenFileDialog ofd = new OpenFileDialog(); ofd.Filter = fileFilter; if (ofd.ShowDialog() != DialogResult.OK) return; pictureBox1.Image = null; pictureBox2.Image = null; textBox1.Text = ""; image_path = ofd.FileName; pictureBox1.Image = new System.Drawing.Bitmap(image_path); image = new Mat(image_path); } private void Form1_Load(object sender, EventArgs e) { // 創(chuàng)建輸入容器 input_ontainer = new List<NamedOnnxValue>(); // 創(chuàng)建輸出會(huì)話 options = new SessionOptions(); options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO; options.AppendExecutionProvider_CPU(0);// 設(shè)置為CPU上運(yùn)行 // 創(chuàng)建推理模型類(lèi),讀取本地模型文件 model_path = "model/dedode_end2end_1024.onnx"; inpHeight = 256; inpWidth = 256; onnx_session = new InferenceSession(model_path, options); // 創(chuàng)建輸入容器 input_ontainer = new List<NamedOnnxValue>(); image_path = "test_img/im_A.jpg"; pictureBox1.Image = new Bitmap(image_path); image_path2 = "test_img/im_B.jpg"; pictureBox3.Image = new Bitmap(image_path2); } private unsafe void button2_Click(object sender, EventArgs e) { if (image_path == "") { return; } textBox1.Text = "檢測(cè)中,請(qǐng)稍等……"; pictureBox2.Image = null; System.Windows.Forms.Application.DoEvents(); image = new Mat(image_path); image2 = new Mat(image_path2); float[] input_tensor_data = new float[2 * 3 * inpWidth * inpHeight]; //preprocess Mat dstimg = new Mat(); Cv2.CvtColor(image, dstimg, ColorConversionCodes.BGR2RGB); Cv2.Resize(dstimg, dstimg, new OpenCvSharp.Size(inpWidth, inpHeight)); for (int c = 0; c < 3; c++) { for (int i = 0; i < inpHeight; i++) { for (int j = 0; j < inpWidth; j++) { float pix = ((byte*)(dstimg.Ptr(i).ToPointer()))[j * 3 + c]; input_tensor_data[c * inpWidth * inpHeight + i * inpWidth + j] = (float)((pix / 255.0 - mean[c]) / std[c]); } } } Cv2.CvtColor(image2, dstimg, ColorConversionCodes.BGR2RGB); Cv2.Resize(dstimg, dstimg, new OpenCvSharp.Size(inpWidth, inpHeight)); for (int c = 0; c < 3; c++) { for (int i = 0; i < inpHeight; i++) { for (int j = 0; j < inpWidth; j++) { float pix = ((byte*)(dstimg.Ptr(i).ToPointer()))[j * 3 + c]; input_tensor_data[(3+c )* inpWidth * inpHeight + i * inpWidth + j] = (float)((pix / 255.0 - mean[c]) / std[c]); } } } input_tensor = new DenseTensor<float>(input_tensor_data, new[] { 2, 3, inpHeight, inpWidth }); //將 input_tensor 放入一個(gè)輸入?yún)?shù)的容器,并指定名稱(chēng) input_ontainer.Add(NamedOnnxValue.CreateFromTensor("images", input_tensor)); dt1 = DateTime.Now; //運(yùn)行 Inference 并獲取結(jié)果 result_infer = onnx_session.Run(input_ontainer); dt2 = DateTime.Now; //Postprocessing //將輸出結(jié)果轉(zhuǎn)為DisposableNamedOnnxValue數(shù)組 results_onnxvalue = result_infer.ToArray(); float[] matches_A = results_onnxvalue[0].AsTensor<float>().ToArray(); float[] matches_B = results_onnxvalue[1].AsTensor<float>().ToArray(); int num_points = results_onnxvalue[0].AsTensor<float>().Dimensions[0]; List<KeyPoint> points_A = new List<KeyPoint>(); List<KeyPoint> points_B = new List<KeyPoint>(); KeyPoint temp; for (int i = 0; i < num_points; i++) { temp = new KeyPoint(); temp.Pt.X = (float)((matches_A[i * 2] + 1) * 0.5 * image.Cols); temp.Pt.Y = (float)((matches_A[i * 2 + 1] + 1) * 0.5 * image.Rows); temp.Size = 1f; points_A.Add(temp); } num_points = results_onnxvalue[1].AsTensor<float>().Dimensions[0]; for (int i = 0; i < num_points; i++) { temp = new KeyPoint(); temp.Pt.X = (float)((matches_B[i * 2] + 1) * 0.5 * image2.Cols); temp.Pt.Y = (float)((matches_B[i * 2 + 1] + 1) * 0.5 * image2.Rows); temp.Size = 1f; points_B.Add(temp); } //匹配結(jié)果放在matches里面 num_points = points_A.Count(); List<DMatch> matches=new List<DMatch>(); for (int i = 0; i < num_points; i++) { matches.Add(new DMatch(i, i, 0f)); } //按照匹配關(guān)系將圖畫(huà)出來(lái),背景圖為match_img Mat match_img = new Mat(); Cv2.DrawMatches(image, points_A, image2, points_B, matches, match_img); pictureBox2.Image = new System.Drawing.Bitmap(match_img.ToMemoryStream()); textBox1.Text = "推理耗時(shí):" + (dt2 - dt1).TotalMilliseconds + "ms"; } private void pictureBox2_DoubleClick(object sender, EventArgs e) { Common.ShowNormalImg(pictureBox2.Image); } private void button3_Click(object sender, EventArgs e) { OpenFileDialog ofd = new OpenFileDialog(); ofd.Filter = fileFilter; if (ofd.ShowDialog() != DialogResult.OK) return; pictureBox3.Image = null; pictureBox2.Image = null; textBox1.Text = ""; image_path2 = ofd.FileName; pictureBox3.Image = new System.Drawing.Bitmap(image_path2); image2 = new Mat(image_path2); } private void pictureBox3_DoubleClick(object sender, EventArgs e) { Common.ShowNormalImg(pictureBox3.Image); } private void pictureBox1_DoubleClick(object sender, EventArgs e) { Common.ShowNormalImg(pictureBox1.Image); } } }
到此這篇關(guān)于C# Onnx實(shí)現(xiàn)特征匹配DeDoDe檢測(cè)的文章就介紹到這了,更多相關(guān)C# Onnx特征匹配內(nèi)容請(qǐng)搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
相關(guān)文章
使用C#實(shí)現(xiàn)將CSV文件內(nèi)容裝配成對(duì)象列表
這篇文章主要為大家詳細(xì)介紹了如何使用C#實(shí)現(xiàn)將CSV文件內(nèi)容裝配成對(duì)象列表,文中的示例代碼講解詳細(xì),感興趣的小伙伴可以跟隨小編一起學(xué)習(xí)一下2023-12-12C#實(shí)現(xiàn)自動(dòng)識(shí)別URL網(wǎng)址的方法
這篇文章主要介紹了C#實(shí)現(xiàn)自動(dòng)識(shí)別URL網(wǎng)址的方法,涉及C#操作URL地址的相關(guān)技巧,需要的朋友可以參考下2015-05-05C# wpf使用ffmpeg命令行實(shí)現(xiàn)錄屏的示例代碼
本文主要介紹了C# wpf使用ffmpeg命令行實(shí)現(xiàn)錄屏的示例代碼,文中通過(guò)示例代碼介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友們下面隨著小編來(lái)一起學(xué)習(xí)學(xué)習(xí)吧2022-08-08