#include "ShowDep_Color.h"
void Show::create_xy_table(const k4a_calibration_t *calibration, k4a_image_t xy_table)
k4a_float2_t *table_data = (k4a_float2_t *)(void *)k4a_image_get_buffer(xy_table);
int width = calibration->depth_camera_calibration.resolution_width;
int height = calibration->depth_camera_calibration.resolution_height;
k4a_float2_t p;
k4a_float3_t ray;
int valid;
for (int y = 0, idx = 0; y < height; y++)
p.xy.y = (float)y;
for (int x = 0; x < width; x++, idx++)
p.xy.x = (float)x;
k4a_calibration_2d_to_3d(
calibration, &p, 1.f, K4A_CALIBRATION_TYPE_DEPTH, K4A_CALIBRATION_TYPE_DEPTH, &ray, &valid);
if (valid)
table_data[idx].xy.x = ray.xyz.x;
table_data[idx].xy.y = ray.xyz.y;
table_data[idx].xy.x = nanf("");
table_data[idx].xy.y = nanf("");
void Show::generate_point_cloud(const k4a::image depth_image, const k4a_image_t xy_table, k4a_image_t point_cloud, int *point_count)
int width = depth_image.get_width_pixels();
int height = depth_image.get_height_pixels();
uint16_t *depth_data = (uint16_t *)(void *)depth_image.get_buffer();
k4a_float2_t *xy_table_data = (k4a_float2_t *)(void *)k4a_image_get_buffer(xy_table);
k4a_float3_t *point_cloud_data = (k4a_float3_t *)(void *)k4a_image_get_buffer(point_cloud);
*point_count = 0;
for (int i = 0; i < width * height; i++)
if (depth_data[i] != 0 && !isnan(xy_table_data[i].xy.x) && !isnan(xy_table_data[i].xy.y))
point_cloud_data[i].xyz.x = xy_table_data[i].xy.x * (float)depth_data[i];
point_cloud_data[i].xyz.y = xy_table_data[i].xy.y * (float)depth_data[i];
point_cloud_data[i].xyz.z = (float)depth_data[i];
(*point_count)++;
point_cloud_data[i].xyz.x = nanf("");
point_cloud_data[i].xyz.y = nanf("");
point_cloud_data[i].xyz.z = nanf("");
void Show::write_point_cloud(const char *file_name, const k4a_image_t point_cloud, int point_count)
int width = k4a_image_get_width_pixels(point_cloud);
int height = k4a_image_get_height_pixels(point_cloud);
k4a_float3_t *point_cloud_data = (k4a_float3_t *)(void *)k4a_image_get_buffer(point_cloud);
std::ofstream ofs(file_name);
ofs << "ply" << std::endl;
ofs << "format ascii 1.0" << std::endl;
ofs << "element vertex"
<< " " << point_count << std::endl;
ofs << "property float x" << std::endl;
ofs << "property float y" << std::endl;
ofs << "property float z" << std::endl;
ofs << "end_header" << std::endl;
ofs.close();
std::stringstream ss;
for (int i = 0; i < width * height; i++)
if (isnan(point_cloud_data[i].xyz.x) || isnan(point_cloud_data[i].xyz.y) || isnan(point_cloud_data[i].xyz.z))
continue;
ss << (float)point_cloud_data[i].xyz.x << " " << (float)point_cloud_data[i].xyz.y << " "
<< (float)point_cloud_data[i].xyz.z << std::endl;
std::ofstream ofs_text(file_name, std::ios::out | std::ios::app);
ofs_text.write(ss.str().c_str(), (std::streamsize)ss.str().length());
void Show::ShowDep_Color(){
const uint32_t deviceCount = k4a::device::get_installed_count();
if (deviceCount == 0)
cout << "未连接任何Kinect设备!" << endl;
k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
config.camera_fps = K4A_FRAMES_PER_SECOND_30;
config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED;
config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
config.color_resolution = K4A_COLOR_RESOLUTION_720P;
config.synchronized_images_only = true;
cout << "Started opening K4A device..." << endl;
k4a::device device = k4a::device::open(K4A_DEVICE_DEFAULT);
device.start_cameras(&config);
cout << "Finished opening K4A device." << endl;
std::vector<Pixel> depthTextureBuffer;
std::vector<Pixel> irTextureBuffer;
uint8_t *colorTextureBuffer;
ofstream rgb_out;
ofstream d_out;
ofstream ir_out;
rgb_out.open("./rgb.txt");
d_out.open("./depth.txt");
ir_out.open("./ir.txt");
rgb_out << "# color images" << endl;
rgb_out << "# file: rgbd_dataset" << endl;
rgb_out << "# timestamp" << " " << "filename" << endl;
d_out << "# depth images" << endl;
d_out << "# file: rgbd_dataset" << endl;
d_out << "# timestamp" << " " << "filename" << endl;
ir_out << "# ir images" << endl;
ir_out << "# file: rgbd_dataset" << endl;
ir_out << "# timestamp" << " " << "filename" << endl;
rgb_out << flush;
d_out << flush;
k4a::image depthImage;
k4a::image rgbImage;
k4a::image irImage;
k4a::image transformed_depthImage;
cv::Mat cv_rgbImage_with_alpha;
cv::Mat cv_rgbImage_no_alpha;
cv::Mat cv_rgbImage_8U;
cv::Mat cv_depth;
cv::Mat cv_depth_8U;
cv::Mat cv_irImage;
cv::Mat cv_irImage_8U;
k4a::capture capture;
cv::Mat depthFrame;
cv::Mat rgbFrame;
cv::Mat irFrame;
int ch;
检索并保存 Azure Kinect 图像数据
k4a_device_t device_Record = NULL;
while (1) {
if (device.get_capture(&capture, std::chrono::milliseconds(0)))
depthImage = capture.get_depth_image();
rgbImage = capture.get_color_image();
irImage = capture.get_ir_image();
ColorizeDepthImage(depthImage, DepthPixelColorizer::ColorizeBlueToRed, GetDepthModeRange(config.depth_mode), &depthTextureBuffer);
ColorizeDepthImage(irImage, DepthPixelColorizer::ColorizeGreyscale, GetIrLevels(K4A_DEPTH_MODE_PASSIVE_IR), &irTextureBuffer);
colorTextureBuffer = rgbImage.get_buffer();
depthFrame = cv::Mat(depthImage.get_height_pixels(), depthImage.get_width_pixels(), CV_8UC4, depthTextureBuffer.data());
rgbFrame = cv::Mat(rgbImage.get_height_pixels(), rgbImage.get_width_pixels(), CV_8UC4, colorTextureBuffer);
irFrame = cv::Mat(irImage.get_height_pixels(), irImage.get_width_pixels(), CV_8UC4, irTextureBuffer.data());
depthFrame.convertTo(cv_depth_8U, CV_8U, 1);
rgbFrame.convertTo(cv_rgbImage_8U, CV_8U, 1);
irFrame.convertTo(cv_irImage_8U, CV_8U, 1);
cv::imshow("Kinect color frame", rgbFrame);
if (_kbhit()) {
imshow("Kinect color frame", rgbFrame);
ch = _getch();
cout << "-----采集成功!-----" << endl;
cout << " " << endl;
if (ch == 13)
k4a::calibration k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);
k4a::transformation k4aTransformation = k4a::transformation(k4aCalibration);
transformed_depthImage = k4aTransformation.depth_image_to_color_camera(depthImage);
cv_rgbImage_with_alpha = cv::Mat(rgbImage.get_height_pixels(), rgbImage.get_width_pixels(), CV_8UC4,
(void *)rgbImage.get_buffer());
cv::cvtColor(cv_rgbImage_with_alpha, cv_rgbImage_no_alpha, cv::COLOR_BGRA2BGR);
cv_depth = cv::Mat(transformed_depthImage.get_height_pixels(), transformed_depthImage.get_width_pixels(), CV_16U,
(void *)transformed_depthImage.get_buffer(), static_cast<size_t>(transformed_depthImage.get_stride_bytes()));
normalize(cv_depth, cv_depth_8U, 0, 256 * 256, NORM_MINMAX);
cv_depth_8U.convertTo(cv_depth, CV_8U, 1);
cv_irImage = cv::Mat(irImage.get_height_pixels(), irImage.get_width_pixels(), CV_16U,
(void *)irImage.get_buffer(), static_cast<size_t>(irImage.get_stride_bytes()));
normalize(cv_irImage, cv_irImage_8U, 0, 256 * 256, NORM_MINMAX);
cv_irImage.convertTo(cv_irImage_8U, CV_8U, 1);
double time_rgb = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
rgbImage.get_device_timestamp()).count());
std::string filename_rgb = std::to_string(time_rgb / 1000000) + ".png";
double time_d = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
depthImage.get_device_timestamp()).count());
std::string filename_d = std::to_string(time_d / 1000000) + ".png";
double time_ir = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
irImage.get_device_timestamp()).count());
std::string filename_ir = std::to_string(time_ir / 1000000) + ".png";
imwrite("./RGB/" + filename_rgb, cv_rgbImage_8U);
imwrite("./Depth/" + filename_d, cv_depth_8U);
imwrite("./Ir/" + filename_ir, cv_irImage_8U);
k4a_image_t xy_table = NULL;
k4a_image_t point_cloud = NULL;
int point_count = 0;
double time_point = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(
rgbImage.get_device_timestamp()).count());
std::string filename_point = "ply/" + std::to_string(time_point / 1000000) + ".ply";
k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM,
k4aCalibration.depth_camera_calibration.resolution_width,
k4aCalibration.depth_camera_calibration.resolution_height,
k4aCalibration.depth_camera_calibration.resolution_width * (int)sizeof(k4a_float2_t),
&xy_table);
create_xy_table(&k4aCalibration, xy_table);
k4a_image_create(K4A_IMAGE_FORMAT_CUSTOM,
k4aCalibration.depth_camera_calibration.resolution_width,
k4aCalibration.depth_camera_calibration.resolution_height,
k4aCalibration.depth_camera_calibration.resolution_width * (int)sizeof(k4a_float3_t),
&point_cloud);
if (depthImage == 0)
printf("Failed to get depth image from capture\n");
generate_point_cloud(depthImage, xy_table, point_cloud, &point_count);
write_point_cloud(filename_point.c_str(), point_cloud, point_count);
k4a_image_release(xy_table);
k4a_image_release(point_cloud);
std::cout << "Acquiring!" << endl;
rgb_out << std::to_string(time_rgb / 1000000) << " " << "rgb/" << filename_rgb << endl;
d_out << std::to_string(time_d / 1000000) << " " << "depth/" << filename_d << endl;
ir_out << std::to_string(time_ir / 1000000) << " " << "ir/" << filename_ir << endl;
imshow("Kinect color frame", rgbFrame);
if (waitKey(30) == 27 || waitKey(30) == 'q')
device.close();
std::cout << "----------------------------------" << std::endl;
std::cout << "------------- closed -------------" << std::endl;
std::cout << "----------------------------------" << std::endl;
break;
基于OpenCV和KinectSDK实现Azure Kinect DK实时获取RGB、深度图像及红外图像、交互式数据采集将图片数据保存本地 基于OpenCV和KinectSDK实现Azure Kinect DK实时获取RGB、深度图像及红外图像、交互式数据采集将图片数据保存本地1.采集效果图2.深度图计算原理3.代码主要函数及其功能3.1配置并启动Kinect设备3.2声明数据结构3.3创建txt文件流3.4 循环获取每一帧的图像3.5数据格式转换3.6通过捕获键盘进行交互式数据采集4.完整代码4.1头文
该存储库中的部分创意来自以下存储库:
:Kinect Azure SDK的一个非常漂亮且干净的Python3包装器。
:像此存储库中一样,使用ctypes的库更加完整,但是缺少有关如何使用该库的示例,并且该库更难使用。
该存储库的目的是通过创建一个易于使用的库来结合两个存储库的优点,该库允许使用Kinect Azure的大多数功能。 另外,创建示例程序以展示库的用途
:构建此库所需。 要使用SDK,请参阅的安装说明。
ctypes :读取库所需。
numpy :矩阵计算所需
opencv-python :图像转换和可视化所需。
如何使用这个库
该库已在Windows 10和Ubuntu 20.04上使用Kinect Azure SDK 1.4.0
前提:官方apt安装Azure Kinect 传感器 SDK
代码AcquiringImages将原始深度图转换到RGB摄像头坐标系下,得到的配准后的深度图,并将转换后的depth图,原始RGB图、原始IR图保存在本地,采集格式仿照TUM数据集,图片的命名格式为时间戳+.png后缀。
在这里插入代码片
安装Kinect SDK,并根据需要更新设备固件版本:
设备固件版本必须至少为1.6.110079014或更高。
如果在Linux上,请确保相关的k4a.so和k4abt.so动态库在您的路径中。
目前只包装了人体追踪器,但添加其他包装器应该很容易,PR也将不胜感激!
有一个非常简单的示例,您可以通过以下方式运行:
py -3 example/simple_sample.py
我们正在使用它为开发一个Azure Kinect插件,您可以在此处看到非常早的演示:
该插件可供早期访问,并将很快开源,请发送电子邮件至以获得早期访问。
不要在Windows应用商店中使用Python
如果出现错误:
[2021-01-18 14:05:28.307] [error] [t=6336] [K4ABT] D:\a\1\s\src\TrackerHost\Track
开发环境 vs2010+OPENCV2.4.10
首先,下载最新的Kinect 2 SDK http://www.microsoft.com/en-us/kinectforwindows/develop/downloads-docs.aspx
下载之后不要插入Kinect,最好也不用插入除了键盘鼠标以外的其它USB设备,然后安装SDK,安装完成之后插入Kinect,会有安装新设备的提示。安装完成之后可以去“开始”那里找到两个新安装的软件,一个是可以显示Kinect深度图,另外一个软件展示SDK中的各种例子程序。
进入SDK的安装目录,可以找到sample这个文件夹,里面是四种语言编写的例子,
Azure Kinect DK 文档
ROS package : Azure_Kinect_ROS_Driver
安装 Azure_Kinect_ROS_Driver
cd catkin_ws/src
git clone https://github.com/microsoft/Azure_Kinect_ROS_Driver.git
cd catkin_ws
catkin_make
修改 k4a_ros_device.h ?
路径:Azure_Kine
def bin2numpy_ex(file_path, shape):
rawImg = np.fromfile(file_path, dtype=np.uint16)
rawImg = rawImg[: shape[0]*shape[1]]
pic_gray = rawImg.re...
1.2 文件拷贝
cd /usr/lib/x86_64-linux-gnu/libk4a1.4
sudo cp libdepthengine.so.2.0 /usr/lib/x86_64-linux-gnu
1.3 下载官方SDK源码:git clone -b v1.2.0 https://gith
1. 相机驱动和SDK的准备
在Ubuntu 18.04系统下安装Azure Kinect DK 深度相机驱动和SDK,可以参考我的另一篇博客:https://blog.csdn.net/denkywu/article/details/103177559。
到这儿,基本的准备工作已经完成了:连接上相机后,打开Azure Kinect 查看器(运行 sudo ./k4aviewer ),...
基于Kinect Azure的多相机数据采集(一)
Kinect Azure相机是微软近几年推出的一款RGBD相机。相比于Kinect一代和二代,Kinect Azure相机采集的图像可达更高的分辨率,且在硬件方面设置了同步接口,更方便于多相机的同步采集。
具体的功能以及SDK所提供的函数接口可参考以下官方提供的文件:
https://docs.microsoft.com/zh-cn/azure/kinect-dk/
https://microsoft.github.io/A
[code=python]
[build] Found '47' packages in 0.0 seconds.
[build] Package table is up to date.
Abandoned <<< panda_moveit_config [ Depends on unknown jobs: moveit_planners ]
Abandoned <<< moveit_tutorials [ Depends on unknown jobs: moveit_planners ]
[/code]