Skip to content
Snippets Groups Projects
Commit 3977997e authored by Ezekiel Dohmen's avatar Ezekiel Dohmen
Browse files

Adding message support to daemon, and small kernel side test

parent 7e30eec2
No related branches found
No related tags found
3 merge requests!439RCG 5.0 release fro deb 10,!415Cdsrfm to use new dolphin interface,!411Userspace dolphin daemon, netlink dolphin setup for real time models
INCS:= -Iinclude/ -I/opt/DIS/include/ -I/opt/DIS/include/dis INCS:= -Iinclude/ -I../include/ -I/opt/DIS/include/ -I/opt/DIS/include/dis
default: default:
$(CXX) src/main.cpp src/DolphinNetlinkServer.cpp $(INCS) -lpthread -lfmt $(CXX) -g src/main.cpp src/DolphinNetlinkServer.cpp src/Dolphin_SISCI_Resource.cpp -o main $(INCS) -L/opt/DIS/lib64 -l:libsisci.a -lpthread -lfmt
clean:
rm -f main
...@@ -3,11 +3,13 @@ ...@@ -3,11 +3,13 @@
#include "LIGO_Thread.hpp" #include "LIGO_Thread.hpp"
#include "Dolphin_SISCI_Resource.hpp" #include "Dolphin_SISCI_Resource.hpp"
#include "daemon_messages.h"
#include <linux/netlink.h> #include <linux/netlink.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <memory> #include <memory>
#include <map>
class DolphinNetlinkServer : public LIGO_Thread class DolphinNetlinkServer : public LIGO_Thread
...@@ -22,20 +24,25 @@ class DolphinNetlinkServer : public LIGO_Thread ...@@ -22,20 +24,25 @@ class DolphinNetlinkServer : public LIGO_Thread
//Initialization Helpers //Initialization Helpers
bool init_sockets(); bool init_sockets();
bool init_dolphin();
//Message Handlers //Message Handlers
void handle_free_req( void * ); void handle_free_all_req( dolphin_mc_free_all_req * );
void handle_alloc_req( void * ); void handle_alloc_req( dolphin_mc_alloc_req * );
private: private:
DolphinNetlinkServer(); DolphinNetlinkServer();
void build_and_send_alloc_resp_error(DOLPHIN_ERROR_CODES status);
void send_netlink_message(void * msg_ptr, unsigned sz_bytes);
//Netlink Socket Info
struct sockaddr_nl _src_addr; struct sockaddr_nl _src_addr;
int _sock_fd; int _sock_fd;
//Dolphin Storage
std::map< unsigned, std::unique_ptr< Dolphin_SISCI_Resource > > _dolphin_resources;
}; };
......
...@@ -3,16 +3,10 @@ ...@@ -3,16 +3,10 @@
#include "daemon_messages.h" #include "daemon_messages.h"
//libspdlog-dev
#include "spdlog/spdlog.h"
//ligo-dolphin-srcdis //ligo-dolphin-srcdis
#include "sisci_api.h" #include "sisci_api.h"
//Dolphin helper defines #include <memory>
#define NO_CALLBACK NULL
#define NO_FLAGS 0
#include <mutex> #include <mutex>
...@@ -20,196 +14,43 @@ class Dolphin_SISCI_Resource ...@@ -20,196 +14,43 @@ class Dolphin_SISCI_Resource
{ {
public: public:
std::unique_ptr< Dolphin_SISCI_Resource > create_instance() static std::unique_ptr< Dolphin_SISCI_Resource > create_instance();
{ virtual ~Dolphin_SISCI_Resource();
static sci_error_t error;
std::unique_ptr< Dolphin_SISCI_Resource > me_ptr (new Dolphin_SISCI_Resource()); DOLPHIN_ERROR_CODES connect_mc_segment(unsigned segment_id, unsigned segment_size);
int get_segment_id();
std::lock_guard<std::mutex> guard( Dolphin_SISCI_Resource::_data_mutex );
if( _usage_count == 0 ) volatile void * kernel_read_addr();
{ volatile void * kernel_write_addr();
// Initialize the SISCI library
SCIInitialize(NO_FLAGS, &error);
if (error != SCI_ERR_OK) {
spdlog::error("Dolphin_SISCI_Resource::init_dolphin() - SCIInitialize failed - Error code: {x}", error);
return nullptr;
}
// Get local nodeId
SCIGetLocalNodeId(_local_adapter_num,
&_local_node_id,
NO_FLAGS,
&error);
if (error != SCI_ERR_OK) {
spdlog::error("Dolphin_SISCI_Resource::create_instance() - Could not find the local adapter {}", _local_adapter_num);
SCITerminate();
return nullptr;
}
++_usage_count;
}
// Open a virtual dev descriptor
SCIOpen(&_v_dev, NO_FLAGS, &error);
if (error != SCI_ERR_OK) {
if (error == SCI_ERR_INCONSISTENT_VERSIONS) {
spdlog::error("Dolphin_SISCI_Resource::init_dolphin() - Version mismatch between SISCI user library and SISCI driver");
}
spdlog::error("Dolphin_SISCI_Resource::init_dolphin() - SCIOpen failed - Error code {x}", error);
return nullptr;
}
return me_ptr;
}
virtual ~Dolphin_SISCI_Resource()
{
static sci_error_t error;
std::lock_guard<std::mutex> guard( Dolphin_SISCI_Resource::_data_mutex );
clean_up_segment();
// Close the file descriptor
SCIClose(_v_dev, NO_FLAGS, &error);
--_usage_count;
//If this is the last usage, clean up library
if ( _usage_count == 0)
{
SCITerminate();
}
}
DOLPHIN_ERROR_CODES connect_mc_segment(unsigned segment_id, unsigned segment_size)
{
static sci_error_t error;
if ( !supports_multicast() )
{
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - Called, but dolphin network does not support multicast.");
return DOLPHIN_ERROR_NO_MCAST_SUPPORT;
}
// Create local reflective memory segment
SCICreateSegment(_v_dev, &_local_segment, segment_id, segment_size, NO_CALLBACK, NULL, SCI_FLAG_BROADCAST, &error);
if (error == SCI_ERR_OK) {
spdlog::info("Dolphin_SISCI_Resource::connect_mc_segment() - Local segment (id={x}, size={}) is created.", segment_id, segment_size);
} else {
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCICreateSegment failed - Error code {x}", error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
// Prepare the segment
SCIPrepareSegment(_local_segment, _local_adapter_num, SCI_FLAG_BROADCAST, &error);
if (error == SCI_ERR_OK) {
spdlog::info("Dolphin_SISCI_Resource::connect_mc_segment() - Local segment (id={x}, size={}) is prepared.", segment_id, segment_size);
} else {
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCIPrepareSegment failed: {} {x}.", SCIGetErrorString(error), error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
// Set the segment available
SCISetSegmentAvailable(_local_segment, _local_adapter_num, NO_FLAGS, &error);
if (error != SCI_ERR_OK) {
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCISetSegmentAvailable failed: {} ({})",SCIGetErrorString(error), error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
//Store the kernel side read address
_read_addr = get_kernel_read_addr();
//Not mapping the segment into userspace (for now)
SCIConnectSegment(_v_dev, &_remote_segment, DIS_BROADCAST_NODEID_GROUP_ALL, segment_id, _local_adapter_num,
NO_CALLBACK, NULL, SCI_INFINITE_TIMEOUT, SCI_FLAG_BROADCAST, &error);
if (error != SCI_ERR_OK)
{
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCIConnectSegment, failed to connect for write addr: {} {x}",
SCIGetErrorString(error), error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
//TODO: Figure out how to get write kernel address
mc_segment_initialized = true;
return DOLPHIN_ERROR_OK;
}
volatile void * kernel_read_addr() { return _read_addr; }
volatile void * kernel_write_addr() { return _write_addr; }
private: private:
//Static members //Static members
static unsigned _usage_count; static unsigned _usage_count;
static std::mutex _data_mutex; static std::mutex _data_mutex;
static const unsigned _local_adapter_num = 0; static const unsigned _local_adapter_num;
static unsigned _local_node_id; //Filled when we init the lib static unsigned _local_node_id; //Filled when we init the lib
//Instance members //Instance members
sci_desc_t _v_dev; sci_desc_t _v_dev;
sci_local_segment_t _local_segment; unsigned _segment_id;
sci_remote_segment_t _remote_segment; sci_local_segment_t _local_segment;
sci_map_t _local_map; sci_remote_segment_t _remote_segment;
sci_map_t _remote_map; sci_map_t _local_map;
volatile void * _read_addr = 0; sci_map_t _remote_map;
volatile void * _write_addr = 0; volatile void * _read_addr = 0;
volatile void * _write_addr = 0;
bool mc_segment_initialized = false;
bool mc_segment_initialized = false;
//Private member functions
bool supports_multicast() //Private member functions
{ bool supports_multicast();
sci_query_adapter_t queryAdapter = {0};
sci_error_t error; volatile void * get_kernel_read_addr();
unsigned int mcast_max_groups = 0;
void clean_up_segment();
queryAdapter.localAdapterNo = _local_adapter_num;
queryAdapter.subcommand = SCI_Q_ADAPTER_MCAST_MAX_GROUPS;
queryAdapter.data = &mcast_max_groups;
SCIQuery(SCI_Q_ADAPTER, &queryAdapter, NO_FLAGS, &error);
// Multicast support is present if the number of multicast groups is not zero
if (mcast_max_groups != 0) return true;
else return false;
}
volatile void * get_kernel_read_addr()
{
sci_error_t error;
sci_query_local_segment_t qlseg;
qlseg.subcommand = SCI_Q_LOCAL_SEGMENT_VIRTUAL_KERNEL_ADDR;
qlseg.segment=_local_segment;
SCIQuery(SCI_Q_LOCAL_SEGMENT, &qlseg, NO_FLAGS, &error);
if (error != SCI_ERR_OK) return nullptr;
return (volatile void *)qlseg.data.ioaddr;
}
void clean_up_segment()
{
sci_error_t error;
if (mc_segment_initialized)
{
SCIDisconnectSegment(_remote_segment, NO_FLAGS, &error);
SCISetSegmentUnavailable(_local_segment, _local_adapter_num, NO_FLAGS, &error);
SCIRemoveSegment(_local_segment, NO_FLAGS, &error);
}
}
}; };
......
#ifndef LIGO_DOLPHIN_DAEMON_MESSAGES #ifndef LIGO_DOLPHIN_DAEMON_MESSAGES
#define LIGO_DOLPHIN_DAEMON_MESSAGES #define LIGO_DOLPHIN_DAEMON_MESSAGES
#include <stdint.h> //#include <stdint.h>
#include "util/fixed_width_types.h"
enum DOLPHIN_DAEMON_MSGS enum DOLPHIN_DAEMON_MSGS
{ {
...@@ -15,8 +16,8 @@ enum DOLPHIN_ERROR_CODES ...@@ -15,8 +16,8 @@ enum DOLPHIN_ERROR_CODES
DOLPHIN_ERROR_OK = 0, DOLPHIN_ERROR_OK = 0,
DOLPHIN_ERROR_NO_LIBRARY_SUPPORT = 1, DOLPHIN_ERROR_NO_LIBRARY_SUPPORT = 1,
DOLPHIN_ERROR_NO_MCAST_SUPPORT = 2, DOLPHIN_ERROR_NO_MCAST_SUPPORT = 2,
DOLPHIN_ERROR_SEGMENT_SETUP_ERROR = 3 DOLPHIN_ERROR_SEGMENT_SETUP_ERROR = 3,
DOLPHIN_ERROR_SEGMENT_ALREADY_ALLOCATED = 4,
}; };
typedef struct dolphin_mc_header typedef struct dolphin_mc_header
...@@ -28,6 +29,7 @@ typedef struct dolphin_mc_alloc_req ...@@ -28,6 +29,7 @@ typedef struct dolphin_mc_alloc_req
{ {
dolphin_mc_header header; dolphin_mc_header header;
uint32_t segment_sz_bytes;
uint32_t num_segments; uint32_t num_segments;
uint32_t segment_ids[]; uint32_t segment_ids[];
} dolphin_mc_alloc_req; } dolphin_mc_alloc_req;
...@@ -35,16 +37,17 @@ typedef struct dolphin_mc_alloc_req ...@@ -35,16 +37,17 @@ typedef struct dolphin_mc_alloc_req
typedef struct dolphin_mc_alloc_resp typedef struct dolphin_mc_alloc_resp
{ {
dolphin_mc_header header; dolphin_mc_header header;
enum DOLPHIN_ERROR_CODES status;
uint32_t num_addrs; uint32_t num_addrs;
//Each dolphin multicast segment has a read and write address //Each dolphin multicast segment has a read and write address
//so each requested segment will have two addresses returned //so each requested segment will have two addresses returned
//in this message. Segments are returned in the order they are requested //in this message. Segments are returned in the order they are requested
//and in the order read address, write address //and in the order read address, write address
void * addrs[]; volatile void * addrs[];
} dolphin_mc_alloc_resp; } dolphin_mc_alloc_resp;
typedef struct dolphin_mc_free_req typedef struct dolphin_mc_free_all_req
{ {
dolphin_mc_header header; dolphin_mc_header header;
} dolphin_mc_free_req; } dolphin_mc_free_req;
......
mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST)))
mkfile_dir := $(dir $(mkfile_path))
KERNEL_DIR=/usr/src/kernel-headers-$(shell uname -r)
obj-m += netlink_test.o
ccflags-y := -std=gnu99 -Wno-declaration-after-statement -I$(mkfile_dir)/../include/ -I$(mkfile_dir)/../../include/
all:
$(CC) -Wall recv.c -o recv
make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
clean:
rm -f recv
make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
#include <net/sock.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/skbuff.h>
#include <linux/kthread.h> // for threads
#include "daemon_messages.h"
#define MYPROTO NETLINK_USERSOCK
#define MYGRP 17
struct sock *nl_sock = NULL;
static struct task_struct *g_thread = 0;
static char thread_name[] = {"test thread"};
static void netlink_test_recv_msg(struct sk_buff *skb)
{
struct sk_buff *skb_out;
struct nlmsghdr *nlh;
int msg_size;
char *msg;
int pid;
int res;
nlh = (struct nlmsghdr *)skb->data;
pid = nlh->nlmsg_pid; /* pid of sending process */
//msg = (char *)nlmsg_data(nlh);
//msg_size = strlen(msg);
dolphin_mc_alloc_resp * resp = (dolphin_mc_alloc_resp *) nlmsg_data(nlh);
printk(KERN_INFO "netlink_test: Received %d bytes from pid %d:\n", nlmsg_len(nlh), pid);
printk(KERN_INFO "netlink_test: msg_type %u, status %u, num_addrs %u\n", resp->header.msg_id, resp->status, resp->num_addrs);
for(int i=0; i < resp->num_addrs; i+=2)
{
printk(KERN_INFO "netlink_test: \t read addr: %p\n", resp->addrs[i]);
printk(KERN_INFO "netlink_test: \t write addr %p\n", resp->addrs[i+1]);
}
/*
// create reply
skb_out = nlmsg_new(msg_size, 0);
if (!skb_out) {
printk(KERN_ERR "netlink_test: Failed to allocate new skb\n");
return;
}
// put received message into reply
nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
//NETLINK_CB(skb_out).dst_group = MYGRP; / in multicast group
strncpy(nlmsg_data(nlh), msg, msg_size);
printk(KERN_INFO "netlink_test: Send %s\n", msg);
res = nlmsg_multicast(nl_sock, skb_out, 0, MYGRP, GFP_KERNEL);
if (res < 0)
printk(KERN_INFO "netlink_test: Error while sending skb to user\n");
*/
}
int multicast_thread_fn( void * pv )
{
struct sk_buff *skb_out;
struct nlmsghdr *nlh;
int res;
dolphin_mc_alloc_req * alloc_req;
unsigned num_segments = 2;
unsigned total_payload_sz = sizeof( dolphin_mc_alloc_req ) + sizeof(uint32_t) * num_segments;
//Send one request
skb_out = nlmsg_new( total_payload_sz, 0);
if (!skb_out) {
printk(KERN_ERR "netlink_test: Failed to allocate new skb\n");
while(!kthread_should_stop())
{
msleep(1000);
}
return 0;
}
//Allocate and send message
nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, total_payload_sz, 0);
//strncpy(nlmsg_data(nlh), msg, msg_size);
alloc_req = (dolphin_mc_alloc_req*) nlmsg_data(nlh);
alloc_req->header.msg_id = DOLPHIN_DAEMON_ALLOC_REQ;
alloc_req->num_segments = num_segments;
for(int i =0 ; i < num_segments; ++i)
{
alloc_req->segment_ids[i] = i;
}
res = nlmsg_multicast(nl_sock, skb_out, 0, MYGRP, GFP_KERNEL);
skb_out = NULL;
while(!kthread_should_stop())
{
msleep(1000);
}
return 0;
}
/*
void real_time_loop( void )
{
struct sk_buff *skb_out;
struct nlmsghdr *nlh;
int res;
const char * msg = "Hello from an isolated core.";
size_t msg_size = 128;
while(atomic_read(&g_atom_should_exit) == 0)
{
skb_out = nlmsg_new(1024, 0);
if (!skb_out) {
printk(KERN_ERR "netlink_test: Failed to allocate new skb\n");
msleep(1000);
continue;
}
//Allocate and send message
nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
strncpy(nlmsg_data(nlh), msg, msg_size);
res = nlmsg_multicast(nl_sock, skb_out, 0, MYGRP, GFP_KERNEL);
skb_out = NULL;
mdelay(250);
}
atomic_set(&g_atom_has_exited, 1);
return;
}*/
static int __init netlink_test_init(void)
{
printk(KERN_INFO "netlink_test: Init module\n");
struct netlink_kernel_cfg cfg = {
.input = netlink_test_recv_msg,
// .groups = MYGRP
};
nl_sock = netlink_kernel_create(&init_net, MYPROTO, &cfg);
if (!nl_sock) {
printk(KERN_ALERT "netlink_test: Error creating socket.\n");
return -10;
}
g_thread = kthread_run(multicast_thread_fn, NULL, thread_name);
if( !g_thread )
{
printk(KERN_ERR " - Could not create the reader kthread.\n");
return -5;
}
return 0;
}
static void __exit netlink_test_exit(void)
{
printk(KERN_INFO "netlink_test: Exit module\n");
if(g_thread)
kthread_stop(g_thread);
netlink_kernel_release(nl_sock);
}
module_init(netlink_test_init);
module_exit(netlink_test_exit);
MODULE_LICENSE("Dual MIT/GPL");
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/socket.h>
#include <linux/netlink.h>
#define MYGRP 17
#define MAX_PAYLOAD 1024 /* maximum payload size*/
void read_event(int sock)
{
struct sockaddr_nl nladdr;
struct msghdr msg;
struct iovec iov;
char buffer[65536];
int ret;
iov.iov_base = (void *) buffer;
iov.iov_len = sizeof(buffer);
msg.msg_name = (void *) &(nladdr);
msg.msg_namelen = sizeof(nladdr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
printf("Listen for message...\n");
ret = recvmsg(sock, &msg, 0);
if (ret < 0)
return;
char *payload = NLMSG_DATA((struct nlmsghdr *) &buffer);
printf("Received from kernel: %s\n", payload);
}
int main(int argc, char **argv)
{
struct sockaddr_nl src_addr;
//struct sockaddr_nl dest_addr;
//struct nlmsghdr *nlh;
//struct msghdr msg;
//struct iovec iov;
int sock_fd;
//int rc;
sock_fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_USERSOCK);
if (sock_fd < 0) {
printf("socket(): %s\n", strerror(errno));
return 1;
}
memset(&src_addr, 0, sizeof(src_addr));
src_addr.nl_family = AF_NETLINK;
//src_addr.nl_pid = getpid(); /* self pid */
src_addr.nl_pid = 0; //from kernel?
//src_addr.nl_groups = 0; /* not in mcast groups */
bind(sock_fd, (struct sockaddr*)&src_addr, sizeof(src_addr));
int group = MYGRP;
if (setsockopt(sock_fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &group, sizeof(group)) < 0) {
printf("setsockopt(NETLINK_ADD_MEMBERSHIP): %s\n", strerror(errno));
close(sock_fd);
return 1;
}
while (1) {
read_event(sock_fd);
}
close(sock_fd);
return 0;
}
#include "DolphinNetlinkServer.hpp" #include "DolphinNetlinkServer.hpp"
#include "daemon_messages.h"
#include "Dolphin_SISCI_Resource.hpp" #include "Dolphin_SISCI_Resource.hpp"
...@@ -7,6 +6,7 @@ ...@@ -7,6 +6,7 @@
#include "spdlog/spdlog.h" #include "spdlog/spdlog.h"
#include "spdlog/cfg/env.h" #include "spdlog/cfg/env.h"
#include <vector>
//Netlink Defines //Netlink Defines
#define NETLINK_GROUP_NUM 17 #define NETLINK_GROUP_NUM 17
...@@ -61,18 +61,39 @@ void DolphinNetlinkServer::thread_body() ...@@ -61,18 +61,39 @@ void DolphinNetlinkServer::thread_body()
continue; continue;
} }
void * data = NLMSG_DATA((struct nlmsghdr *) &buffer);
dolphin_mc_header * hdr = (dolphin_mc_header *) data; unsigned payload_sz = ret - NLMSG_HDRLEN ;
switch ( hdr->msg_id ) spdlog::info("Received {} bytes, payload_sz : {}", ret, payload_sz);
union msgs_t
{
dolphin_mc_header * hdr;
dolphin_mc_alloc_req * alloc_req;
dolphin_mc_free_all_req * free_req;
};
msgs_t msg;
msg.hdr = (dolphin_mc_header *) NLMSG_DATA((struct nlmsghdr *) &buffer);;
switch ( msg.hdr->msg_id )
{ {
case DOLPHIN_DAEMON_ALLOC_REQ: case DOLPHIN_DAEMON_ALLOC_REQ:
handle_alloc_req( data );
//Verify message length
if (payload_sz < (msg.alloc_req->num_segments * sizeof(uint32_t) ) + sizeof(dolphin_mc_alloc_req) )
{
spdlog::error("DolphinNetlinkServer - Invalid message size {}, for num_segments {}",
payload_sz, msg.alloc_req->num_segments);
continue;
}
handle_alloc_req( msg.alloc_req );
break; break;
case DOLPHIN_DAEMON_FREE_REQ: case DOLPHIN_DAEMON_FREE_REQ:
handle_free_req( data ); handle_free_all_req( msg.free_req );
break; break;
default: default:
spdlog::error("DolphinNetlinkServer - Invalid message id of {}, discarding message.", hdr->msg_id); spdlog::error("DolphinNetlinkServer - Invalid message id of {}, discarding message.", msg.hdr->msg_id);
continue; continue;
break; break;
...@@ -86,15 +107,135 @@ void DolphinNetlinkServer::thread_body() ...@@ -86,15 +107,135 @@ void DolphinNetlinkServer::thread_body()
} }
void DolphinNetlinkServer::handle_free_req( void * msg ) void DolphinNetlinkServer::handle_free_all_req( dolphin_mc_free_all_req * req_ptr )
{ {
dolphin_mc_free_req * req_ptr = (dolphin_mc_free_req *) msg; _dolphin_resources.clear(); //Free all dolphin resources
spdlog::info("DolphinNetlinkServer::handle_free_all_req() - Dolphin resources released.");
return;
}
void DolphinNetlinkServer::handle_alloc_req( dolphin_mc_alloc_req * req_ptr )
{
DOLPHIN_ERROR_CODES status;
//We leave new segments in this vector until we have processed the whole message
//if we fail along the way we return, and these resources get cleaned up
std::vector< std::unique_ptr< Dolphin_SISCI_Resource > > _new_segments;
spdlog::info("dolphin_mc_alloc_req message received.");
spdlog::info("There are {} num_segments requested.", req_ptr->num_segments);
for(int i =0; i < req_ptr->num_segments; ++i)
{
spdlog::info("Segment {}", req_ptr->segment_ids[i]);
//It does not look like dolphin supports two conntections to the same multicast segment
//from a single node, so we only are going to support one segment id per front end / daemon
auto it = _dolphin_resources.find(req_ptr->segment_ids[i]);
if (it != _dolphin_resources.end() )
{
spdlog::error("DolphinNetlinkServer::handle_alloc_req() - "
"Sgment ID {} was requested, but segment is already allocated.",
req_ptr->segment_ids[i]);
return build_and_send_alloc_resp_error(DOLPHIN_ERROR_SEGMENT_ALREADY_ALLOCATED);
}
//Create a new dolphin resource manager for the segment
_new_segments.push_back( Dolphin_SISCI_Resource::create_instance() );
if( !_new_segments.back() ) return build_and_send_alloc_resp_error(DOLPHIN_ERROR_SEGMENT_SETUP_ERROR);
//Setup the multicast segment
status = _new_segments.back()->connect_mc_segment(req_ptr->segment_ids[i], req_ptr->segment_sz_bytes);
if( status != DOLPHIN_ERROR_OK ) return build_and_send_alloc_resp_error(status);
}
//If we got here, each of the requested segments have been accepted, add all to map
for(int i=0; i < _new_segments.size(); ++i)
{
_dolphin_resources[_new_segments.back()->get_segment_id()] = std::move(_new_segments[i]);//todo: can we use for index and move?
}
//Time to send the accepted respose
unsigned num_addrs = req_ptr->num_segments * 2; //One read and write addr for each segment
unsigned msg_size = sizeof(dolphin_mc_alloc_resp) + (num_addrs * sizeof(void*)); //msg size is base struct plus the number of addrs we are filling
dolphin_mc_alloc_resp * resp_ptr = (dolphin_mc_alloc_resp *) malloc( msg_size );
resp_ptr->header.msg_id = DOLPHIN_DAEMON_ALLOC_RESP;
resp_ptr->status = DOLPHIN_ERROR_OK;
resp_ptr->num_addrs = num_addrs;
int addr_count = 0;
for(int i=0; i < req_ptr->num_segments; ++i)
{
resp_ptr->addrs[addr_count] = _dolphin_resources[ req_ptr->segment_ids[i] ]->kernel_read_addr();
resp_ptr->addrs[addr_count+1] = _dolphin_resources[ req_ptr->segment_ids[i] ]->kernel_write_addr();
addr_count += 2;
}
send_netlink_message(resp_ptr, msg_size);
return;
} }
void DolphinNetlinkServer::handle_alloc_req( void * msg ) void DolphinNetlinkServer::build_and_send_alloc_resp_error(DOLPHIN_ERROR_CODES status)
{ {
dolphin_mc_alloc_req * req_ptr = (dolphin_mc_alloc_req *) msg; //Build error response message
dolphin_mc_alloc_resp resp;
memset(&resp, 0, sizeof(resp));
resp.header.msg_id = DOLPHIN_DAEMON_ALLOC_RESP;
resp.status = status;
resp.num_addrs = 0;
//No returned segments on error, so msg is just struct size
send_netlink_message(&resp, sizeof(resp));
}
void DolphinNetlinkServer::send_netlink_message(void * msg_ptr, unsigned sz_bytes)
{
struct sockaddr_nl dest_addr;
memset(&dest_addr, 0, sizeof(dest_addr));
dest_addr.nl_family = AF_NETLINK;
dest_addr.nl_pid = 0; // For Linux Kernel
dest_addr.nl_groups = 0; // unicast
struct nlmsghdr *nlh;
nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE( sz_bytes ));
memset(nlh, 0, NLMSG_SPACE( sz_bytes ));
nlh->nlmsg_len = NLMSG_SPACE(sz_bytes);
nlh->nlmsg_pid = getpid(); // self pid
nlh->nlmsg_flags = 0;
memcpy(NLMSG_DATA(nlh), msg_ptr, sz_bytes);
struct iovec iov;
memset(&iov, 0, sizeof(iovec));
iov.iov_base = (void *)nlh;
iov.iov_len = nlh->nlmsg_len;
struct msghdr msg;
memset(&msg, 0, sizeof(msg));
msg.msg_name = (void *)&dest_addr;
msg.msg_namelen = sizeof(dest_addr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
int rc = sendmsg(_sock_fd, &msg, 0);
if (rc < 0) {
spdlog::error("sendmsg(): {}", strerror(errno));
}
spdlog::info("Sent message ret {}", rc);
free(nlh);
} }
bool DolphinNetlinkServer::init_sockets() bool DolphinNetlinkServer::init_sockets()
......
#include "Dolphin_SISCI_Resource.hpp"
//libspdlog-dev
#include "spdlog/spdlog.h"
//Dolphin helper defines
#define NO_CALLBACK NULL
#define NO_FLAGS 0
#include <mutex>
unsigned Dolphin_SISCI_Resource::_usage_count = 0;
std::mutex Dolphin_SISCI_Resource::_data_mutex;
const unsigned Dolphin_SISCI_Resource::_local_adapter_num = 0;
unsigned Dolphin_SISCI_Resource::_local_node_id;
std::unique_ptr< Dolphin_SISCI_Resource > Dolphin_SISCI_Resource::create_instance()
{
sci_error_t error;
std::unique_ptr< Dolphin_SISCI_Resource > me_ptr (new Dolphin_SISCI_Resource());
std::lock_guard<std::mutex> guard( Dolphin_SISCI_Resource::_data_mutex );
if( Dolphin_SISCI_Resource::_usage_count == 0 )
{
// Initialize the SISCI library
SCIInitialize(NO_FLAGS, &error);
if (error != SCI_ERR_OK) {
spdlog::error("Dolphin_SISCI_Resource::init_dolphin() - SCIInitialize failed - Error code: 0x{:x}", error);
SCITerminate();
return nullptr;
}
// Get local nodeId
SCIGetLocalNodeId(_local_adapter_num,
&_local_node_id,
NO_FLAGS,
&error);
if (error != SCI_ERR_OK) {
spdlog::error("Dolphin_SISCI_Resource::create_instance() - Could not find the local adapter {}", _local_adapter_num);
SCITerminate();
return nullptr;
}
}
++Dolphin_SISCI_Resource::_usage_count;
// Open a virtual dev descriptor
SCIOpen(&me_ptr->_v_dev, NO_FLAGS, &error);
if (error != SCI_ERR_OK) {
if (error == SCI_ERR_INCONSISTENT_VERSIONS) {
spdlog::error("Dolphin_SISCI_Resource::init_dolphin() - Version mismatch between SISCI user library and SISCI driver");
}
spdlog::error("Dolphin_SISCI_Resource::init_dolphin() - SCIOpen failed - Error code 0x{:x}", error);
return nullptr;
}
return me_ptr;
}
Dolphin_SISCI_Resource::~Dolphin_SISCI_Resource()
{
sci_error_t error;
std::lock_guard<std::mutex> guard( Dolphin_SISCI_Resource::_data_mutex );
clean_up_segment();
// Close the file descriptor
SCIClose(_v_dev, NO_FLAGS, &error);
--Dolphin_SISCI_Resource::_usage_count;
//If this is the last usage, clean up library
if ( Dolphin_SISCI_Resource::_usage_count == 0)
{
spdlog::info("! Calling SCITerminate() !");
SCITerminate();
}
}
DOLPHIN_ERROR_CODES Dolphin_SISCI_Resource::connect_mc_segment(unsigned segment_id, unsigned segment_size)
{
sci_error_t error;
if ( !supports_multicast() )
{
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - Called, but dolphin network does not support multicast.");
return DOLPHIN_ERROR_NO_MCAST_SUPPORT;
}
// Create local reflective memory segment
SCICreateSegment(_v_dev, &_local_segment, segment_id, segment_size, NO_CALLBACK, NULL, SCI_FLAG_BROADCAST, &error);
if (error == SCI_ERR_OK) {
spdlog::info("Dolphin_SISCI_Resource::connect_mc_segment() - Local segment (id=0x{:x}, size={}) is created.", segment_id, segment_size);
} else {
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCICreateSegment failed - Error code 0x{:x}", error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
// Prepare the segment
SCIPrepareSegment(_local_segment, _local_adapter_num, SCI_FLAG_BROADCAST, &error);
if (error == SCI_ERR_OK) {
spdlog::info("Dolphin_SISCI_Resource::connect_mc_segment() - Local segment (id=0x{:x}, size={}) is prepared.", segment_id, segment_size);
} else {
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCIPrepareSegment failed: {} 0x{:x}.", SCIGetErrorString(error), error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
// Set the segment available
SCISetSegmentAvailable(_local_segment, _local_adapter_num, NO_FLAGS, &error);
if (error != SCI_ERR_OK) {
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCISetSegmentAvailable failed: {} ({})",SCIGetErrorString(error), error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
//Store the kernel side read address
_read_addr = get_kernel_read_addr();
//Not mapping the segment into userspace (for now)
SCIConnectSegment(_v_dev, &_remote_segment, DIS_BROADCAST_NODEID_GROUP_ALL, segment_id, _local_adapter_num,
NO_CALLBACK, NULL, SCI_INFINITE_TIMEOUT, SCI_FLAG_BROADCAST, &error);
if (error != SCI_ERR_OK)
{
spdlog::error("Dolphin_SISCI_Resource::connect_mc_segment() - SCIConnectSegment, failed to connect for write addr: {} 0x{:x}",
SCIGetErrorString(error), error);
return DOLPHIN_ERROR_SEGMENT_SETUP_ERROR;
}
//TODO: Figure out how to get write kernel address
mc_segment_initialized = true;
_segment_id = segment_id;
return DOLPHIN_ERROR_OK;
}
int Dolphin_SISCI_Resource::get_segment_id()
{
if(mc_segment_initialized = false)
return -1;
else
return _segment_id;
}
volatile void * Dolphin_SISCI_Resource::kernel_read_addr() { return _read_addr; }
volatile void * Dolphin_SISCI_Resource::kernel_write_addr() { return _write_addr; }
//Private member functions
bool Dolphin_SISCI_Resource::supports_multicast()
{
sci_query_adapter_t queryAdapter = {0};
sci_error_t error;
unsigned int mcast_max_groups = 0;
queryAdapter.localAdapterNo = _local_adapter_num;
queryAdapter.subcommand = SCI_Q_ADAPTER_MCAST_MAX_GROUPS;
queryAdapter.data = &mcast_max_groups;
SCIQuery(SCI_Q_ADAPTER, &queryAdapter, NO_FLAGS, &error);
// Multicast support is present if the number of multicast groups is not zero
if (mcast_max_groups != 0) return true;
else return false;
}
volatile void * Dolphin_SISCI_Resource::get_kernel_read_addr()
{
sci_error_t error;
sci_query_local_segment_t qlseg;
qlseg.subcommand = SCI_Q_LOCAL_SEGMENT_VIRTUAL_KERNEL_ADDR;
qlseg.segment=_local_segment;
SCIQuery(SCI_Q_LOCAL_SEGMENT, &qlseg, NO_FLAGS, &error);
if (error != SCI_ERR_OK) return nullptr;
return (volatile void *)qlseg.data.ioaddr;
}
void Dolphin_SISCI_Resource::clean_up_segment()
{
sci_error_t error;
if (mc_segment_initialized)
{
SCIDisconnectSegment(_remote_segment, NO_FLAGS, &error);
SCISetSegmentUnavailable(_local_segment, _local_adapter_num, NO_FLAGS, &error);
SCIRemoveSegment(_local_segment, NO_FLAGS, &error);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment