Initial commit of package localserver
This commit is contained in:
commit
5e95736e76
104
config.go
Normal file
104
config.go
Normal file
@ -0,0 +1,104 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type LocalServerConfig struct {
|
||||
configFilePath string
|
||||
NodeId string `json:"nodeId"`
|
||||
PrivateKeyPath string `json:"privateKeyPath"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
func NewLocalServerConfig() (localServerConfig *LocalServerConfig, err error) {
|
||||
logFile, err := os.OpenFile(filepath.Join("local_server.log"), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0755)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.SetOutput(logFile)
|
||||
logger.SetFlags(log.Ldate | log.LUTC | log.Lshortfile | log.Ltime | log.LstdFlags | log.Lmsgprefix)
|
||||
localServerConfig = &LocalServerConfig{
|
||||
configFilePath: filepath.Join("config","node_config.json"),
|
||||
}
|
||||
err = localServerConfig.startup()
|
||||
return
|
||||
}
|
||||
|
||||
func (l *LocalServerConfig) startup() (err error) {
|
||||
if _, err = os.Stat(l.configFilePath); os.IsNotExist(err) {
|
||||
file, fileErr := os.Create(l.configFilePath)
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
id := uuid.NewString()
|
||||
e := NewEncryptionManager()
|
||||
keypair, keyErr := e.GenerateKeyPair(context.Background(), id, "")
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
baseConfig := map[string]interface{}{
|
||||
"nodeId": id,
|
||||
"privKeyPath": filepath.Join("config", id),
|
||||
"token": "",
|
||||
}
|
||||
bs, marshallErr := json.Marshal(&baseConfig)
|
||||
if marshallErr != nil {
|
||||
return marshallErr
|
||||
}
|
||||
_, writeErr := file.Write(bs)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
key_bytes := x509.MarshalPKCS1PublicKey(&keypair.PublicKey)
|
||||
key := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PUBLIC KEY",
|
||||
Bytes: key_bytes,
|
||||
})
|
||||
logger.Println(string(key))
|
||||
l.NodeId = id
|
||||
l.PrivateKeyPath = ""
|
||||
l.Token = ""
|
||||
bss, marshallErr := json.Marshal(map[string]interface{}{
|
||||
"type": "create_node",
|
||||
"from": id,
|
||||
"to": "serv",
|
||||
"token": "",
|
||||
"payload": map[string]string{
|
||||
"nodeId": id,
|
||||
"nodeKey": string(key),
|
||||
"nodeUsername": "anonymous",
|
||||
},
|
||||
})
|
||||
if marshallErr != nil {
|
||||
return marshallErr
|
||||
}
|
||||
res, postErr := http.Post("https://dev.zippytal.com/req", "application/json", bytes.NewBuffer(bss))
|
||||
if postErr != nil {
|
||||
logger.Println("e")
|
||||
return postErr
|
||||
}
|
||||
logger.Println(res)
|
||||
if err = file.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
file, err := os.Open(l.configFilePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = json.NewDecoder(file).Decode(&l)
|
||||
return
|
||||
}
|
||||
112
encryption_manager.go
Normal file
112
encryption_manager.go
Normal file
@ -0,0 +1,112 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const PEM_PRIVATE_KEY = "RSA PRIVATE KEY"
|
||||
const PEM_PUBLIC_KEY = "RSA PUBLIC KEY"
|
||||
const OPENSSH_PRIVATE_KEY = "OPENSSH PRIVATE KEY"
|
||||
|
||||
type EncryptionManager struct {
|
||||
PrivKey *rsa.PrivateKey
|
||||
PeerKeys map[string]*rsa.PublicKey
|
||||
}
|
||||
|
||||
func NewEncryptionManager() (eManager *EncryptionManager) {
|
||||
eManager = new(EncryptionManager)
|
||||
return
|
||||
}
|
||||
|
||||
func (em *EncryptionManager) GenerateKeyPair(ctx context.Context, name string, pwd string) (privKey *rsa.PrivateKey, err error) {
|
||||
privKey, err = rsa.GenerateKey(rand.Reader, 1<<12)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
block := &pem.Block{
|
||||
Type: PEM_PRIVATE_KEY,
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(privKey),
|
||||
}
|
||||
if pwd != "" {
|
||||
block, err = x509.EncryptPEMBlock(rand.Reader, block.Type, block.Bytes, []byte(pwd), x509.PEMCipherAES256)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
f, err := os.Create(filepath.Join("config", name))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = f.Write(pem.EncodeToMemory(block))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pubBlock := &pem.Block{
|
||||
Type: PEM_PUBLIC_KEY,
|
||||
Bytes: x509.MarshalPKCS1PublicKey(&privKey.PublicKey),
|
||||
}
|
||||
if pwd != "" {
|
||||
pubBlock, err = x509.EncryptPEMBlock(rand.Reader, pubBlock.Type, pubBlock.Bytes, []byte(pwd), x509.PEMCipherAES256)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
pubf, err := os.Create(filepath.Join("config", fmt.Sprintf("%s.pub", name)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = pubf.Write(pem.EncodeToMemory(pubBlock))
|
||||
return
|
||||
}
|
||||
|
||||
func (em *EncryptionManager) LoadPrivKey(privKeyPath string, password string) (err error) {
|
||||
f, err := os.Open(privKeyPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var buff []byte
|
||||
buff, err = io.ReadAll(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
privPem, _ := pem.Decode(buff)
|
||||
var privePemBytes []byte
|
||||
if privPem.Type != PEM_PRIVATE_KEY && privPem.Type != OPENSSH_PRIVATE_KEY {
|
||||
logger.Println(privPem.Type)
|
||||
err = fmt.Errorf("RSA Private key is of wrong type")
|
||||
return
|
||||
}
|
||||
if password != "" {
|
||||
privePemBytes, err = x509.DecryptPEMBlock(privPem, []byte(password))
|
||||
} else {
|
||||
privePemBytes = privPem.Bytes
|
||||
}
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(privePemBytes); err != nil {
|
||||
logger.Printf("error one %v\n", err)
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(privePemBytes); err != nil {
|
||||
logger.Printf("error on parsing %v\n", err)
|
||||
if parsedKey, err = x509.ParseECPrivateKey(privePemBytes); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
var privKey *rsa.PrivateKey
|
||||
var ok bool
|
||||
privKey, ok = parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
err = fmt.Errorf("Unable to parse this private key")
|
||||
return
|
||||
}
|
||||
em.PrivKey = privKey
|
||||
return
|
||||
}
|
||||
49
go.mod
Normal file
49
go.mod
Normal file
@ -0,0 +1,49 @@
|
||||
module github.com/loisBN/zippytal_node/localserver
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/dgraph-io/badger/v3 v3.2103.2
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/pion/rtcp v1.2.9
|
||||
github.com/pion/webrtc/v3 v3.1.11
|
||||
google.golang.org/grpc v1.42.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
|
||||
github.com/golang/snappy v0.0.3 // indirect
|
||||
github.com/google/flatbuffers v1.12.1 // indirect
|
||||
github.com/klauspost/compress v1.12.3 // indirect
|
||||
github.com/pion/datachannel v1.5.2 // indirect
|
||||
github.com/pion/dtls/v2 v2.0.10 // indirect
|
||||
github.com/pion/ice/v2 v2.1.14 // indirect
|
||||
github.com/pion/interceptor v0.1.2 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.5 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtp v1.7.4 // indirect
|
||||
github.com/pion/sctp v1.8.0 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.4 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.5 // indirect
|
||||
github.com/pion/stun v0.3.5 // indirect
|
||||
github.com/pion/transport v0.12.3 // indirect
|
||||
github.com/pion/turn/v2 v2.0.5 // indirect
|
||||
github.com/pion/udp v0.1.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
go.opencensus.io v0.22.5 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309 // indirect
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
|
||||
)
|
||||
1958
grpc_manager.pb.go
Normal file
1958
grpc_manager.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
422
grpc_manager_grpc.pb.go
Normal file
422
grpc_manager_grpc.pb.go
Normal file
@ -0,0 +1,422 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
|
||||
package localserver
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// GrpcManagerClient is the client API for GrpcManager service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type GrpcManagerClient interface {
|
||||
Link(ctx context.Context, opts ...grpc.CallOption) (GrpcManager_LinkClient, error)
|
||||
RegisterPeer(ctx context.Context, in *PeerRegisterRequest, opts ...grpc.CallOption) (*PeerRegisterResponse, error)
|
||||
ListPeers(ctx context.Context, in *PeerListRequest, opts ...grpc.CallOption) (*PeerListResponse, error)
|
||||
CreateSquad(ctx context.Context, in *SquadCreateRequest, opts ...grpc.CallOption) (*SquadCreateResponse, error)
|
||||
UpdateSquad(ctx context.Context, in *SquadUpdateRequest, opts ...grpc.CallOption) (*SquadUpdateResponse, error)
|
||||
DeleteSquad(ctx context.Context, in *SquadDeleteRequest, opts ...grpc.CallOption) (*SquadDeleteResponse, error)
|
||||
ListSquad(ctx context.Context, in *SquadListRequest, opts ...grpc.CallOption) (*SquadListResponse, error)
|
||||
ConnectSquad(ctx context.Context, in *SquadConnectRequest, opts ...grpc.CallOption) (*SquadConnectResponse, error)
|
||||
LeaveSquad(ctx context.Context, in *SquadLeaveRequest, opts ...grpc.CallOption) (*SquadLeaveResponse, error)
|
||||
}
|
||||
|
||||
type grpcManagerClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewGrpcManagerClient(cc grpc.ClientConnInterface) GrpcManagerClient {
|
||||
return &grpcManagerClient{cc}
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) Link(ctx context.Context, opts ...grpc.CallOption) (GrpcManager_LinkClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &GrpcManager_ServiceDesc.Streams[0], "/manager.GrpcManager/Link", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &grpcManagerLinkClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type GrpcManager_LinkClient interface {
|
||||
Send(*Request) error
|
||||
Recv() (*Response, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type grpcManagerLinkClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *grpcManagerLinkClient) Send(m *Request) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *grpcManagerLinkClient) Recv() (*Response, error) {
|
||||
m := new(Response)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) RegisterPeer(ctx context.Context, in *PeerRegisterRequest, opts ...grpc.CallOption) (*PeerRegisterResponse, error) {
|
||||
out := new(PeerRegisterResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/RegisterPeer", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) ListPeers(ctx context.Context, in *PeerListRequest, opts ...grpc.CallOption) (*PeerListResponse, error) {
|
||||
out := new(PeerListResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/ListPeers", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) CreateSquad(ctx context.Context, in *SquadCreateRequest, opts ...grpc.CallOption) (*SquadCreateResponse, error) {
|
||||
out := new(SquadCreateResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/CreateSquad", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) UpdateSquad(ctx context.Context, in *SquadUpdateRequest, opts ...grpc.CallOption) (*SquadUpdateResponse, error) {
|
||||
out := new(SquadUpdateResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/UpdateSquad", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) DeleteSquad(ctx context.Context, in *SquadDeleteRequest, opts ...grpc.CallOption) (*SquadDeleteResponse, error) {
|
||||
out := new(SquadDeleteResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/DeleteSquad", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) ListSquad(ctx context.Context, in *SquadListRequest, opts ...grpc.CallOption) (*SquadListResponse, error) {
|
||||
out := new(SquadListResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/ListSquad", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) ConnectSquad(ctx context.Context, in *SquadConnectRequest, opts ...grpc.CallOption) (*SquadConnectResponse, error) {
|
||||
out := new(SquadConnectResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/ConnectSquad", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *grpcManagerClient) LeaveSquad(ctx context.Context, in *SquadLeaveRequest, opts ...grpc.CallOption) (*SquadLeaveResponse, error) {
|
||||
out := new(SquadLeaveResponse)
|
||||
err := c.cc.Invoke(ctx, "/manager.GrpcManager/LeaveSquad", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// GrpcManagerServer is the server API for GrpcManager service.
|
||||
// All implementations must embed UnimplementedGrpcManagerServer
|
||||
// for forward compatibility
|
||||
type GrpcManagerServer interface {
|
||||
Link(GrpcManager_LinkServer) error
|
||||
RegisterPeer(context.Context, *PeerRegisterRequest) (*PeerRegisterResponse, error)
|
||||
ListPeers(context.Context, *PeerListRequest) (*PeerListResponse, error)
|
||||
CreateSquad(context.Context, *SquadCreateRequest) (*SquadCreateResponse, error)
|
||||
UpdateSquad(context.Context, *SquadUpdateRequest) (*SquadUpdateResponse, error)
|
||||
DeleteSquad(context.Context, *SquadDeleteRequest) (*SquadDeleteResponse, error)
|
||||
ListSquad(context.Context, *SquadListRequest) (*SquadListResponse, error)
|
||||
ConnectSquad(context.Context, *SquadConnectRequest) (*SquadConnectResponse, error)
|
||||
LeaveSquad(context.Context, *SquadLeaveRequest) (*SquadLeaveResponse, error)
|
||||
mustEmbedUnimplementedGrpcManagerServer()
|
||||
}
|
||||
|
||||
// UnimplementedGrpcManagerServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedGrpcManagerServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedGrpcManagerServer) Link(GrpcManager_LinkServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Link not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) RegisterPeer(context.Context, *PeerRegisterRequest) (*PeerRegisterResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterPeer not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) ListPeers(context.Context, *PeerListRequest) (*PeerListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListPeers not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) CreateSquad(context.Context, *SquadCreateRequest) (*SquadCreateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateSquad not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) UpdateSquad(context.Context, *SquadUpdateRequest) (*SquadUpdateResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdateSquad not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) DeleteSquad(context.Context, *SquadDeleteRequest) (*SquadDeleteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteSquad not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) ListSquad(context.Context, *SquadListRequest) (*SquadListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListSquad not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) ConnectSquad(context.Context, *SquadConnectRequest) (*SquadConnectResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ConnectSquad not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) LeaveSquad(context.Context, *SquadLeaveRequest) (*SquadLeaveResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method LeaveSquad not implemented")
|
||||
}
|
||||
func (UnimplementedGrpcManagerServer) mustEmbedUnimplementedGrpcManagerServer() {}
|
||||
|
||||
// UnsafeGrpcManagerServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to GrpcManagerServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeGrpcManagerServer interface {
|
||||
mustEmbedUnimplementedGrpcManagerServer()
|
||||
}
|
||||
|
||||
func RegisterGrpcManagerServer(s grpc.ServiceRegistrar, srv GrpcManagerServer) {
|
||||
s.RegisterService(&GrpcManager_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _GrpcManager_Link_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(GrpcManagerServer).Link(&grpcManagerLinkServer{stream})
|
||||
}
|
||||
|
||||
type GrpcManager_LinkServer interface {
|
||||
Send(*Response) error
|
||||
Recv() (*Request, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type grpcManagerLinkServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *grpcManagerLinkServer) Send(m *Response) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *grpcManagerLinkServer) Recv() (*Request, error) {
|
||||
m := new(Request)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _GrpcManager_RegisterPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PeerRegisterRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).RegisterPeer(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/RegisterPeer",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).RegisterPeer(ctx, req.(*PeerRegisterRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GrpcManager_ListPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PeerListRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).ListPeers(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/ListPeers",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).ListPeers(ctx, req.(*PeerListRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GrpcManager_CreateSquad_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SquadCreateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).CreateSquad(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/CreateSquad",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).CreateSquad(ctx, req.(*SquadCreateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GrpcManager_UpdateSquad_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SquadUpdateRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).UpdateSquad(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/UpdateSquad",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).UpdateSquad(ctx, req.(*SquadUpdateRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GrpcManager_DeleteSquad_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SquadDeleteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).DeleteSquad(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/DeleteSquad",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).DeleteSquad(ctx, req.(*SquadDeleteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GrpcManager_ListSquad_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SquadListRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).ListSquad(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/ListSquad",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).ListSquad(ctx, req.(*SquadListRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GrpcManager_ConnectSquad_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SquadConnectRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).ConnectSquad(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/ConnectSquad",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).ConnectSquad(ctx, req.(*SquadConnectRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _GrpcManager_LeaveSquad_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SquadLeaveRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(GrpcManagerServer).LeaveSquad(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/manager.GrpcManager/LeaveSquad",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(GrpcManagerServer).LeaveSquad(ctx, req.(*SquadLeaveRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// GrpcManager_ServiceDesc is the grpc.ServiceDesc for GrpcManager service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var GrpcManager_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "manager.GrpcManager",
|
||||
HandlerType: (*GrpcManagerServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "RegisterPeer",
|
||||
Handler: _GrpcManager_RegisterPeer_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListPeers",
|
||||
Handler: _GrpcManager_ListPeers_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateSquad",
|
||||
Handler: _GrpcManager_CreateSquad_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateSquad",
|
||||
Handler: _GrpcManager_UpdateSquad_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteSquad",
|
||||
Handler: _GrpcManager_DeleteSquad_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListSquad",
|
||||
Handler: _GrpcManager_ListSquad_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ConnectSquad",
|
||||
Handler: _GrpcManager_ConnectSquad_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "LeaveSquad",
|
||||
Handler: _GrpcManager_LeaveSquad_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Link",
|
||||
Handler: _GrpcManager_Link_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc_manager.proto",
|
||||
}
|
||||
32
logMiddleware.go
Normal file
32
logMiddleware.go
Normal file
@ -0,0 +1,32 @@
|
||||
package localserver
|
||||
|
||||
// type (
|
||||
// LogMiddleware struct {
|
||||
// menuItem *systray.MenuItem
|
||||
// }
|
||||
// )
|
||||
|
||||
// func NewLogMiddleware(menuItem *systray.MenuItem) *LogMiddleware {
|
||||
// return &LogMiddleware{
|
||||
// menuItem: menuItem,
|
||||
// }
|
||||
// }
|
||||
|
||||
// func (lm *LogMiddleware) Process(ctx context.Context, req *http.Request, w http.ResponseWriter) (err error) {
|
||||
// done, errCh := make(chan struct{}), make(chan error)
|
||||
// go func() {
|
||||
// logger.Println("received a request")
|
||||
// lm.menuItem.AddSubMenuItemCheckbox(filepath.Join("remote addr : %s", req.RemoteAddr), "remote", false)
|
||||
// logger.Printf("from %s\n", req.RemoteAddr)
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// err = ctx.Err()
|
||||
// return
|
||||
// case <-done:
|
||||
// return nil
|
||||
// case err = <-errCh:
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
355
p2p_fs_datachannel_manager.go
Normal file
355
p2p_fs_datachannel_manager.go
Normal file
@ -0,0 +1,355 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
HOME = "home"
|
||||
)
|
||||
|
||||
const (
|
||||
FS_GET_FOLDER = "fs_get_folder"
|
||||
FS_GET_FOLDER_RESPONSE = "fs_get_folder_response"
|
||||
FS_UPLOAD_FILE = "fs_upload_file"
|
||||
FS_UPLOAD_FILE_INIT = "fs_upload_file_init"
|
||||
FS_UPLOAD_FILE_END = "fs_upload_file_response_end"
|
||||
FS_DOWNLOAD_FILE = "fs_download_file"
|
||||
FS_DOWNLOAD_FILE_RESPONSE = "fs_download_file_response"
|
||||
FS_DOWNLOAD_FILE_RESPONSE_INIT = "fs_download_file_response_init"
|
||||
FS_DOWNLOAD_FILE_RESPONSE_END = "fs_download_file_response_end"
|
||||
FS_CREATE_FOLDER = "fs_create_folder"
|
||||
)
|
||||
|
||||
type DataChannelManager interface {
|
||||
HandleMessage(message *DatachannelMessage, channel *webrtc.DataChannel) (done chan struct{}, errChan chan error)
|
||||
}
|
||||
|
||||
type P2PFSDatachannelManager struct {
|
||||
l *sync.Mutex
|
||||
uploadFile map[string]*os.File
|
||||
}
|
||||
|
||||
type DatachannelMessage struct {
|
||||
From string `json:"from"`
|
||||
Type string `json:"type"`
|
||||
Payload *DatachannelMessagePayload `json:"payload"`
|
||||
}
|
||||
|
||||
type DatachannelMessagePayload struct {
|
||||
Path string
|
||||
Content []byte
|
||||
}
|
||||
|
||||
type FSResultInfo struct {
|
||||
Name string
|
||||
Type string
|
||||
Path string
|
||||
Size int64
|
||||
ModTime string
|
||||
}
|
||||
|
||||
func NewP2PFSDatachannelManager() (manager *P2PFSDatachannelManager) {
|
||||
manager = &P2PFSDatachannelManager{&sync.Mutex{}, make(map[string]*os.File)}
|
||||
return
|
||||
}
|
||||
|
||||
func (pdm *P2PFSDatachannelManager) HandleMessage(message *DatachannelMessage, channel *webrtc.DataChannel) (done chan struct{}, errChan chan error) {
|
||||
done, errChan = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
switch message.Type {
|
||||
case FS_GET_FOLDER:
|
||||
logger.Println("try to get folder")
|
||||
|
||||
done, err := pdm.handleGetFolder(message.Payload.Path, channel)
|
||||
select {
|
||||
case <-done:
|
||||
logger.Println("operation succeed")
|
||||
case e := <-err:
|
||||
errChan <- e
|
||||
return
|
||||
}
|
||||
case FS_DOWNLOAD_FILE:
|
||||
logger.Println("tried to download a file")
|
||||
done, err := pdm.sendFile(message.Payload.Path, channel)
|
||||
select {
|
||||
case <-done:
|
||||
logger.Println("operation succeed")
|
||||
case e := <-err:
|
||||
errChan <- e
|
||||
return
|
||||
}
|
||||
case FS_UPLOAD_FILE_INIT:
|
||||
logger.Println("tried to upload a file")
|
||||
if err := pdm.downloadFileInit(message.Payload.Path); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
case FS_UPLOAD_FILE:
|
||||
if err := pdm.downloadFile(message.Payload.Path, message.Payload.Content); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
case FS_UPLOAD_FILE_END:
|
||||
if err := pdm.downloadFileEnd(message.Payload.Path); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
case FS_CREATE_FOLDER:
|
||||
if err := pdm.createFolder(message.Payload.Path); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
default:
|
||||
logger.Printf("got a new message from %s with payload %v\n", message.From, message.Payload)
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
return done, errChan
|
||||
}
|
||||
|
||||
func (pdm *P2PFSDatachannelManager) handleGetFolder(path string, channel *webrtc.DataChannel) (done chan struct{}, errCh chan error) {
|
||||
done, errCh = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
var dirPath string
|
||||
if path == HOME {
|
||||
homeDir, err := os.UserHomeDir()
|
||||
logger.Println(homeDir)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
dirPath = homeDir
|
||||
} else {
|
||||
dirPath = path
|
||||
}
|
||||
dir, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
index := 1
|
||||
for i, v := range dirPath {
|
||||
if i > 0 {
|
||||
if v == '/' {
|
||||
index = i
|
||||
}
|
||||
}
|
||||
}
|
||||
dirs := []*FSResultInfo{{Name: "..", Type: "directory", Path: dirPath[:index], Size: 0, ModTime: time.Now().Format("January 2, 2006")}}
|
||||
for _, d := range dir {
|
||||
f := &FSResultInfo{}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
continue
|
||||
}
|
||||
f.ModTime = fmt.Sprintf("%s at %s", info.ModTime().Format("January 2, 2006"), info.ModTime().Format("15:04:05"))
|
||||
f.Path = fmt.Sprintf("%s/%s", dirPath, info.Name())
|
||||
f.Size = info.Size()
|
||||
f.Name = info.Name()
|
||||
if info.IsDir() {
|
||||
f.Type = "directory"
|
||||
} else {
|
||||
f.Type = "file"
|
||||
}
|
||||
dirs = append(dirs, f)
|
||||
}
|
||||
|
||||
if len(dirs) < 100 {
|
||||
logger.Println("dir smaller than 100")
|
||||
bs, err := json.Marshal(map[string]interface{}{
|
||||
"type": FS_GET_FOLDER_RESPONSE,
|
||||
"from": "lolo_local_serv",
|
||||
"payload": map[string]interface{}{
|
||||
"folderPath": dirPath,
|
||||
"dirContent": dirs,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := channel.SendText(string(bs)); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
} else {
|
||||
logger.Println("dir greater than 10000")
|
||||
reste := len(dirs) % 100
|
||||
x := (len(dirs) - reste) / 100
|
||||
for j := 0; j < x; j++ {
|
||||
logger.Println("dir sending packet than 100")
|
||||
d := dirs[j*100 : (j+1)*100]
|
||||
logger.Println("length of d :", len(d))
|
||||
bs, err := json.Marshal(map[string]interface{}{
|
||||
"type": FS_GET_FOLDER_RESPONSE,
|
||||
"from": "lolo_local_serv",
|
||||
"payload": map[string]interface{}{
|
||||
"folderPath": dirPath,
|
||||
"dirContent": d,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := channel.SendText(string(bs)); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
bs, err := json.Marshal(map[string]interface{}{
|
||||
"type": FS_GET_FOLDER_RESPONSE,
|
||||
"from": "lolo_local_serv",
|
||||
"payload": map[string]interface{}{
|
||||
"folderPath": dirPath,
|
||||
"dirContent": dirs[len(dirs)-reste:],
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := channel.SendText(string(bs)); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (pdm *P2PFSDatachannelManager) sendFile(path string, channel *webrtc.DataChannel) (<-chan struct{}, <-chan error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
bsInit, err := json.Marshal(map[string]interface{}{
|
||||
"type": FS_DOWNLOAD_FILE_RESPONSE_INIT,
|
||||
"from": "lolo_local_serv",
|
||||
"payload": map[string]string{
|
||||
"path": path,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := channel.SendText(string(bsInit)); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
r := bufio.NewReader(f)
|
||||
buf := make([]byte, 0, 10000)
|
||||
for {
|
||||
n, err := r.Read(buf[:cap(buf)])
|
||||
buf = buf[:n]
|
||||
if n == 0 {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
log.Fatal(err)
|
||||
}
|
||||
bs, err := json.Marshal(map[string]interface{}{
|
||||
"type": FS_DOWNLOAD_FILE_RESPONSE,
|
||||
"from": "lolo_local_serv",
|
||||
"payload": map[string]interface{}{
|
||||
"path": path,
|
||||
"content": buf,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := channel.SendText(string(bs)); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
bs, err := json.Marshal(map[string]interface{}{
|
||||
"type": FS_DOWNLOAD_FILE_RESPONSE_END,
|
||||
"from": "lolo_local_serv",
|
||||
"payload": map[string]string{
|
||||
"path": path,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := channel.SendText(string(bs)); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
return done, errCh
|
||||
}
|
||||
|
||||
func (pdm *P2PFSDatachannelManager) downloadFileInit(path string) (err error) {
|
||||
logger.Println("upload path name is", path)
|
||||
if _, ok := pdm.uploadFile[path]; ok {
|
||||
err = fmt.Errorf("the file %s is already being uploaded", path)
|
||||
return
|
||||
}
|
||||
file, e := os.Create(path)
|
||||
if e != nil {
|
||||
err = fmt.Errorf("an error occured in download file init : %v", e)
|
||||
return
|
||||
}
|
||||
index := 1
|
||||
for i, v := range path {
|
||||
if i > 0 {
|
||||
if v == '/' {
|
||||
index = i
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Println(path)
|
||||
logger.Println(path[index+1:])
|
||||
pdm.uploadFile[path[index+1:]] = file
|
||||
return
|
||||
}
|
||||
|
||||
func (pdm *P2PFSDatachannelManager) downloadFile(path string, content []byte) (err error) {
|
||||
if _, ok := pdm.uploadFile[path]; !ok {
|
||||
err = fmt.Errorf("no upload file open for path %s", path)
|
||||
return
|
||||
}
|
||||
_, err = pdm.uploadFile[path].Write(content)
|
||||
return
|
||||
}
|
||||
|
||||
func (pdm *P2PFSDatachannelManager) downloadFileEnd(path string) (err error) {
|
||||
logger.Println("closing file")
|
||||
if _, ok := pdm.uploadFile[path]; !ok {
|
||||
err = fmt.Errorf("no upload file open for path %s", path)
|
||||
return
|
||||
}
|
||||
err = pdm.uploadFile[path].Close()
|
||||
delete(pdm.uploadFile, path)
|
||||
return
|
||||
}
|
||||
|
||||
func (pdm *P2PFSDatachannelManager) createFolder(path string) (err error) {
|
||||
err = os.Mkdir(path, 0770)
|
||||
return
|
||||
}
|
||||
149
proto/grpc_manager.proto
Normal file
149
proto/grpc_manager.proto
Normal file
@ -0,0 +1,149 @@
|
||||
syntax = "proto3";
|
||||
package manager;
|
||||
option go_package=".;localserver";
|
||||
|
||||
message Request {
|
||||
string type = 1;
|
||||
string from = 2;
|
||||
string token = 3;
|
||||
map<string,string> payload = 4;
|
||||
}
|
||||
|
||||
message PeerRegisterRequest {
|
||||
string peerId = 1;
|
||||
string peerKey = 2;
|
||||
string peerUsername = 3;
|
||||
}
|
||||
|
||||
message PeerRegisterResponse {
|
||||
bool success = 1;
|
||||
string log = 2;
|
||||
}
|
||||
|
||||
message PeerListRequest {
|
||||
int32 number = 1;
|
||||
int32 lastIndex = 2;
|
||||
string name = 3;
|
||||
map<string,string> filters = 4;
|
||||
}
|
||||
|
||||
message SquadConnectRequest {
|
||||
string id = 1;
|
||||
string userId = 2;
|
||||
string password = 3;
|
||||
string authType = 4;
|
||||
string networkType = 5;
|
||||
}
|
||||
|
||||
message ProtoSquad {
|
||||
string id = 1;
|
||||
string name = 2;
|
||||
repeated string members = 3;
|
||||
string squadType = 4;
|
||||
string owner = 5;
|
||||
string host = 6;
|
||||
string authType = 7;
|
||||
bool status = 8;
|
||||
}
|
||||
|
||||
message SquadCreateRequest {
|
||||
string userId = 1;
|
||||
string name = 2;
|
||||
string squadType = 3;
|
||||
string password = 4;
|
||||
}
|
||||
|
||||
message SquadListRequest {
|
||||
int32 number = 1;
|
||||
int32 lastIndex = 2;
|
||||
string name = 3;
|
||||
map<string,string> filters = 4;
|
||||
string squadType = 5;
|
||||
string squadNetworkType = 6;
|
||||
}
|
||||
|
||||
message SquadUpdateRequest {
|
||||
string userId = 1;
|
||||
string id = 5;
|
||||
string name = 2;
|
||||
string squadType = 3;
|
||||
string password = 4;
|
||||
}
|
||||
|
||||
message SquadDeleteRequest {
|
||||
string userId = 1;
|
||||
string squadId = 2;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
string id = 1;
|
||||
string name = 2;
|
||||
string pubKey = 3;
|
||||
bool active = 4;
|
||||
}
|
||||
|
||||
message PeerListResponse {
|
||||
bool success = 1;
|
||||
int32 lastIndex = 2;
|
||||
repeated Peer peers = 3;
|
||||
}
|
||||
|
||||
message SquadConnectResponse {
|
||||
bool success = 1;
|
||||
string reason = 2;
|
||||
string id = 3;
|
||||
repeated string members = 4;
|
||||
}
|
||||
|
||||
message SquadLeaveRequest {
|
||||
string userId = 1;
|
||||
string squadId = 2;
|
||||
}
|
||||
|
||||
message SquadCreateResponse {
|
||||
bool success = 1;
|
||||
string reason = 2;
|
||||
ProtoSquad squad = 3;
|
||||
}
|
||||
|
||||
message SquadListResponse {
|
||||
bool success = 1;
|
||||
int32 lastIndex = 2;
|
||||
repeated ProtoSquad squads = 3;
|
||||
}
|
||||
|
||||
message SquadUpdateResponse {
|
||||
bool success = 1;
|
||||
string reason = 2;
|
||||
ProtoSquad squad = 3;
|
||||
}
|
||||
|
||||
message SquadDeleteResponse {
|
||||
bool succes = 1;
|
||||
string reason = 2;
|
||||
ProtoSquad squad = 3;
|
||||
}
|
||||
|
||||
message SquadLeaveResponse {
|
||||
bool success = 1;
|
||||
string reason = 2;
|
||||
string squadId = 3;
|
||||
}
|
||||
|
||||
message Response {
|
||||
string type = 1;
|
||||
bool success = 2;
|
||||
map<string,string> payload = 3;
|
||||
}
|
||||
|
||||
service GrpcManager {
|
||||
rpc Link(stream Request) returns (stream Response);
|
||||
rpc RegisterPeer (PeerRegisterRequest) returns (PeerRegisterResponse);
|
||||
rpc ListPeers(PeerListRequest) returns (PeerListResponse);
|
||||
rpc CreateSquad (SquadCreateRequest) returns (SquadCreateResponse);
|
||||
rpc UpdateSquad (SquadUpdateRequest) returns (SquadUpdateResponse);
|
||||
rpc DeleteSquad (SquadDeleteRequest) returns (SquadDeleteResponse);
|
||||
rpc ListSquad (SquadListRequest) returns (SquadListResponse);
|
||||
rpc ConnectSquad (SquadConnectRequest) returns (SquadConnectResponse);
|
||||
rpc LeaveSquad (SquadLeaveRequest) returns (SquadLeaveResponse);
|
||||
}
|
||||
222
server.go
Normal file
222
server.go
Normal file
@ -0,0 +1,222 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
var logger *log.Logger = new(log.Logger)
|
||||
var dbLogger *BadgerLogger = &BadgerLogger{
|
||||
Logger: logger,
|
||||
}
|
||||
|
||||
type BadgerLogger struct {
|
||||
*log.Logger
|
||||
}
|
||||
|
||||
func (b *BadgerLogger) Debugf(message string, data ...interface{}) {
|
||||
b.Println(data...)
|
||||
}
|
||||
|
||||
func (b *BadgerLogger) Errorf(message string, data ...interface{}) {
|
||||
b.Println(data...)
|
||||
}
|
||||
|
||||
func (b *BadgerLogger) Infof(message string, data ...interface{}) {
|
||||
b.Println(data...)
|
||||
}
|
||||
|
||||
func (b *BadgerLogger) Warningf(message string, data ...interface{}) {
|
||||
b.Println(data...)
|
||||
}
|
||||
|
||||
type (
|
||||
LocalServerHandlerMiddleware interface {
|
||||
Process(ctx context.Context, req *http.Request, w http.ResponseWriter) (err error)
|
||||
}
|
||||
|
||||
GrpcClientManagerMiddleware interface {
|
||||
Process(ctx context.Context, req *Response, stream GrpcManager_LinkClient) (err error)
|
||||
}
|
||||
|
||||
LocalServerHandler struct {
|
||||
middlewares []LocalServerHandlerMiddleware
|
||||
}
|
||||
|
||||
LocalServer struct {
|
||||
ID string
|
||||
GrpcClientManager *GrpcClientManager
|
||||
}
|
||||
|
||||
GrpcClientManager struct {
|
||||
GrpcConn grpc.ClientConnInterface
|
||||
GrpcManagerClient GrpcManagerClient
|
||||
GrpcLinkClient GrpcManager_LinkClient
|
||||
middlewares []GrpcClientManagerMiddleware
|
||||
}
|
||||
|
||||
CustomMenuItem struct {
|
||||
//MenuItem *systray.MenuItem
|
||||
ID string
|
||||
State bool
|
||||
SubMenus []*CustomMenuItem
|
||||
CallBack func(bool)
|
||||
}
|
||||
|
||||
ReqType string
|
||||
|
||||
LocalServerRequest struct {
|
||||
ReqType ReqType
|
||||
Payload map[string]interface{}
|
||||
}
|
||||
)
|
||||
|
||||
func NewLocalServer(addr string, grpcAddr string, id string, token string) (localServer *LocalServer, err error) {
|
||||
webRTCCallManager, err := NewWebRTCCallManager(id, token, NewWebrtcCallSoundManager(), NewWebrtcCallChatManager(), NewWebrtcCallVideoManager(), NewWebrtcCallFileManager())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
zoneManager, err := NewZoneManager(id, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
webrtcFsManager, err := NewWebrtcFsManager(NewP2PFSDatachannelManager())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
webrtcGrpcMiddleware := NewWebRTCGrpcMiddleware(webRTCCallManager)
|
||||
ZoneGrpcMiddleware := NewZoneGrpcMiddleware(zoneManager)
|
||||
webrtcFsMiddleware := NewWebRTCFsMiddleware(webrtcFsManager)
|
||||
grpcClientManager, err := NewGrpcClientManager(grpcAddr, id, webrtcGrpcMiddleware, ZoneGrpcMiddleware)
|
||||
webrtcGrpcMiddleware.stream = grpcClientManager.GrpcLinkClient
|
||||
webRTCCallManager.stream = grpcClientManager.GrpcLinkClient
|
||||
zoneManager.stream = grpcClientManager.GrpcLinkClient
|
||||
webrtcFsMiddleware.stream = grpcClientManager.GrpcLinkClient
|
||||
webrtcFsManager.stream = grpcClientManager.GrpcLinkClient
|
||||
ZoneGrpcMiddleware.stream = grpcClientManager.GrpcLinkClient
|
||||
localServer = &LocalServer{
|
||||
ID: id,
|
||||
GrpcClientManager: grpcClientManager,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewGrpcClientManager(addr string, id string, middleware ...GrpcClientManagerMiddleware) (grpcClientManager *GrpcClientManager, err error) {
|
||||
conn, grpcClient, grpcLinkClient, err := NewGrpcConn(addr, id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
grpcClientManager = &GrpcClientManager{
|
||||
GrpcConn: conn,
|
||||
GrpcManagerClient: grpcClient,
|
||||
GrpcLinkClient: grpcLinkClient,
|
||||
middlewares: middleware,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewGrpcConn(addr string, id string) (conn grpc.ClientConnInterface, grpcClient GrpcManagerClient, grpcLinkClient GrpcManager_LinkClient, err error) {
|
||||
var cert = filepath.Join("config", "cert.pem")
|
||||
creds, err := credentials.NewClientTLSFromFile(cert, "dev.zippytal.com")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var opts []grpc.DialOption = []grpc.DialOption{grpc.WithTransportCredentials(creds)}
|
||||
conn, err = grpc.Dial(addr, opts...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
grpcClient = NewGrpcManagerClient(conn)
|
||||
grpcLinkClient, err = grpcClient.Link(context.Background())
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if err = grpcLinkClient.Send(&Request{
|
||||
Type: "init",
|
||||
From: id,
|
||||
Payload: map[string]string{},
|
||||
Token: "none",
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewLocalServerHandler(middlewares ...LocalServerHandlerMiddleware) (localServerHandler *LocalServerHandler) {
|
||||
localServerHandler = &LocalServerHandler{
|
||||
middlewares: middlewares,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (lsh *LocalServerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
wg, done, errCh := &sync.WaitGroup{}, make(chan struct{}), make(chan error)
|
||||
for _, middleware := range lsh.middlewares {
|
||||
wg.Add(1)
|
||||
go func(m LocalServerHandlerMiddleware) {
|
||||
err := m.Process(req.Context(), req, w)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
wg.Done()
|
||||
}(middleware)
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
case <-done:
|
||||
case err := <-errCh:
|
||||
logger.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (gcm *GrpcClientManager) Handle(ctx context.Context) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
res, err := gcm.GrpcLinkClient.Recv()
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
for _, middleware := range gcm.middlewares {
|
||||
wg.Add(1)
|
||||
go func(m GrpcClientManagerMiddleware) {
|
||||
if err := m.Process(ctx, res, gcm.GrpcLinkClient); err != nil {
|
||||
logger.Println(err)
|
||||
}
|
||||
wg.Done()
|
||||
}(middleware)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-gcm.GrpcLinkClient.Context().Done():
|
||||
logger.Println("grpc context")
|
||||
err = gcm.GrpcLinkClient.Context().Err()
|
||||
return
|
||||
case <-ctx.Done():
|
||||
logger.Println("app context")
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
case err = <-errCh:
|
||||
if closeErr := gcm.GrpcLinkClient.CloseSend(); closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
19
signalingHttpClient.go
Normal file
19
signalingHttpClient.go
Normal file
@ -0,0 +1,19 @@
|
||||
package localserver
|
||||
|
||||
type SignalingHttpClient struct{}
|
||||
|
||||
func (s *SignalingHttpClient) LoadHostedSquads() {
|
||||
|
||||
}
|
||||
|
||||
func (s *SignalingHttpClient) CreateHostedSquad() {
|
||||
|
||||
}
|
||||
|
||||
func (s *SignalingHttpClient) DeleteHostedSquad() {
|
||||
|
||||
}
|
||||
|
||||
func (s *SignalingHttpClient) UpdateHostedSquad() {
|
||||
|
||||
}
|
||||
136
webrtcCallChatManager.go
Normal file
136
webrtcCallChatManager.go
Normal file
@ -0,0 +1,136 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
CHAT_MESSAGE_BROADCAST = "chat_message_broadcast"
|
||||
CHAT_MESSAGE_PRIVATE = "chat_message_private"
|
||||
)
|
||||
|
||||
type WebrtcCallChatManager struct{}
|
||||
|
||||
func NewWebrtcCallChatManager() *WebrtcCallChatManager {
|
||||
return new(WebrtcCallChatManager)
|
||||
}
|
||||
|
||||
func (w *WebrtcCallChatManager) HandleCallEvent(from string, squadId string, eventId string, payload map[string]interface{}, data []byte, manager *WebRTCCallManager) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
logger.Println("got an event in call chat manager", from, eventId, payload)
|
||||
switch eventId {
|
||||
case CHAT_MESSAGE_BROADCAST:
|
||||
if e := validateEvent(payload, "message"); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
if e := w.sendBrodcastChatMessage(from, payload["message"].(string), squadId, manager); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
case CHAT_MESSAGE_PRIVATE:
|
||||
if e := validateEvent(payload, "message", "dst"); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
if e := w.sendPrivateChatMessage(from, payload["message"].(string), squadId, manager, payload["message"].([]string)...); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case err = <-errCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (w WebrtcCallChatManager) sendBrodcastChatMessage(from string, message string, squadId string, manager *WebRTCCallManager) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
if _, ok := manager.DataChannels[member]; ok && member != from {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": "send_chat_message",
|
||||
"from": from,
|
||||
"payload": map[string]string{
|
||||
"message": message,
|
||||
},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
continue
|
||||
}
|
||||
lock:
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break lock
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w WebrtcCallChatManager) sendPrivateChatMessage(from string, message string, squadId string, manager *WebRTCCallManager, dst ...string) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
for _, id := range dst {
|
||||
if id == member {
|
||||
if _, ok := manager.DataChannels[member]; ok && member != from {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": "",
|
||||
"from": from,
|
||||
"payload": map[string]string{
|
||||
"message": message,
|
||||
},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
continue
|
||||
}
|
||||
lock:
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break lock
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateEvent(event map[string]interface{}, fields ...string) (err error) {
|
||||
for _, field := range fields {
|
||||
if _, ok := event[field]; !ok {
|
||||
err = fmt.Errorf("no field %s in req payload", field)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
5
webrtcCallEventManager.go
Normal file
5
webrtcCallEventManager.go
Normal file
@ -0,0 +1,5 @@
|
||||
package localserver
|
||||
|
||||
type WebrtcCallEventManager interface {
|
||||
HandleCallEvent(from string, squadId string, eventId string, payload map[string]interface{}, data []byte, manager *WebRTCCallManager) (err error)
|
||||
}
|
||||
311
webrtcCallFileManager.go
Normal file
311
webrtcCallFileManager.go
Normal file
@ -0,0 +1,311 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
UPLOAD_INIT = "upload_init"
|
||||
UPLOAD = "upload"
|
||||
UPLOAD_DONE = "upload_done"
|
||||
DOWNLOAD_INIT = "download_init"
|
||||
DOWNLOAD = "download"
|
||||
DOWNLOAD_DONE = "download_done"
|
||||
HOSTED_SQUAD_DOWNLOAD_FILE_RESPONSE_INIT = "hosted_squad_download_file_response_init"
|
||||
HOSTED_SQUAD_DOWNLOAD_FILE_RESPONSE = "hosted_squad_download_file_response"
|
||||
HOSTED_SQUAD_DOWNLOAD_FILE_RESPONSE_END = "hosted_squad_download_file_response_end"
|
||||
)
|
||||
|
||||
const (
|
||||
bufferedAmountLowThreshold uint64 = 512 * 1024
|
||||
//maxBufferedAmount uint64 = 1024 * 1024
|
||||
)
|
||||
|
||||
type WebrtcCallFileManager struct {
|
||||
files map[string]*os.File
|
||||
l *int32
|
||||
}
|
||||
|
||||
func NewWebrtcCallFileManager() *WebrtcCallFileManager {
|
||||
l := int32(0)
|
||||
return &WebrtcCallFileManager{
|
||||
files: make(map[string]*os.File),
|
||||
l: &l,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WebrtcCallFileManager) HandleCallEvent(from string, squadId string, eventId string, payload map[string]interface{}, data []byte, manager *WebRTCCallManager) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
logger.Println("got an event in call file manager", from, eventId, payload)
|
||||
switch eventId {
|
||||
case UPLOAD_INIT:
|
||||
if _, ok := payload["filename"]; !ok {
|
||||
errCh <- fmt.Errorf("no field filename in payload")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["filename"].(string); !ok {
|
||||
errCh <- fmt.Errorf("field filename in payload is not a string")
|
||||
return
|
||||
}
|
||||
if err = w.initUpload(squadId, from, payload["filename"].(string)); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case UPLOAD:
|
||||
if _, ok := payload["filename"]; !ok {
|
||||
errCh <- fmt.Errorf("no field filename in payload")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["filename"].(string); !ok {
|
||||
errCh <- fmt.Errorf("field filename in payload is not a string")
|
||||
return
|
||||
}
|
||||
if err = w.upload(squadId, from, payload["filename"].(string), data); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case UPLOAD_DONE:
|
||||
if _, ok := payload["filename"]; !ok {
|
||||
errCh <- fmt.Errorf("no field filename in payload")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["filename"].(string); !ok {
|
||||
errCh <- fmt.Errorf("field filename in payload is not a string")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["targets"]; !ok {
|
||||
errCh <- fmt.Errorf("no field targets in payload")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["targets"].([]interface{}); !ok {
|
||||
errCh <- fmt.Errorf("field targets in payload is not a string")
|
||||
return
|
||||
}
|
||||
channels := []*DataChannel{}
|
||||
manager.DataChannelMapMux.RLock()
|
||||
for _, target := range payload["targets"].([]interface{}) {
|
||||
if _, ok := manager.DataChannels[target.(string)]; !ok {
|
||||
manager.DataChannelMapMux.RUnlock()
|
||||
errCh <- fmt.Errorf("no corresponding datachannel : %s", target.(string))
|
||||
return
|
||||
}
|
||||
channel := manager.DataChannels[target.(string)]
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(channel.l, 0, 1) {
|
||||
defer atomic.SwapInt32(channel.l, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
channels = append(channels, channel)
|
||||
}
|
||||
manager.DataChannelMapMux.RUnlock()
|
||||
if err = w.uploadDone(squadId, from, payload["filename"].(string), channels); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case DOWNLOAD:
|
||||
if _, ok := payload["filename"]; !ok {
|
||||
errCh <- fmt.Errorf("no field filename in payload")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["filename"].(string); !ok {
|
||||
errCh <- fmt.Errorf("field filename in payload is not a string")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["peerId"]; !ok {
|
||||
errCh <- fmt.Errorf("no field peerId in payload")
|
||||
return
|
||||
}
|
||||
if _, ok := payload["peerId"].(string); !ok {
|
||||
errCh <- fmt.Errorf("field peerId in payload is not a string")
|
||||
return
|
||||
}
|
||||
manager.DataChannelMapMux.RLock()
|
||||
if _, ok := manager.DataChannels[payload["peerId"].(string)]; !ok {
|
||||
manager.DataChannelMapMux.RUnlock()
|
||||
errCh <- fmt.Errorf("no corresponding datachannel")
|
||||
return
|
||||
}
|
||||
channel := manager.DataChannels[payload["peerId"].(string)]
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(channel.l, 0, 1) {
|
||||
logger.Println("atomic lock unlocked")
|
||||
defer atomic.SwapInt32(channel.l, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
manager.DataChannelMapMux.RUnlock()
|
||||
if err = w.download(squadId, from, payload["filename"].(string), channel.DataChannel); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case err = <-errCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WebrtcCallFileManager) initUpload(squadId string, from string, fileName string) (err error) {
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(w.l, 0, 1) {
|
||||
defer atomic.SwapInt32(w.l, 0)
|
||||
if _, dirErr := os.Stat(filepath.Join("data", "squads", squadId)); os.IsNotExist(dirErr) {
|
||||
if err = os.MkdirAll(filepath.Join("data", "squads", squadId), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
f, fErr := os.Create(filepath.Join("data", "squads", squadId, fileName))
|
||||
if err != nil {
|
||||
return fErr
|
||||
}
|
||||
f.Close()
|
||||
f, fErr = os.OpenFile(filepath.Join("data", "squads", squadId, fileName), os.O_APPEND|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return fErr
|
||||
}
|
||||
w.files[fileName] = f
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *WebrtcCallFileManager) upload(squadId string, from string, fileName string, data []byte) (err error) {
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(w.l, 0, 1) {
|
||||
defer atomic.SwapInt32(w.l, 0)
|
||||
if _, ok := w.files[fileName]; !ok {
|
||||
err = fmt.Errorf("no open file with name %s", fileName)
|
||||
return
|
||||
}
|
||||
_, err = w.files[fileName].Write(data)
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *WebrtcCallFileManager) uploadDone(squadId string, from string, fileName string, channels []*DataChannel) (err error) {
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(w.l, 0, 1) {
|
||||
defer atomic.SwapInt32(w.l, 0)
|
||||
if _, ok := w.files[fileName]; !ok {
|
||||
err = fmt.Errorf("no open file with name %s", fileName)
|
||||
return
|
||||
}
|
||||
err = w.files[fileName].Close()
|
||||
delete(w.files, fileName)
|
||||
bsInit, jsonErr := json.Marshal(map[string]interface{}{
|
||||
"type": UPLOAD_DONE,
|
||||
"from": "server",
|
||||
"payload": map[string]string{
|
||||
"path": fileName,
|
||||
},
|
||||
})
|
||||
if jsonErr != nil {
|
||||
return jsonErr
|
||||
}
|
||||
for _, channel := range channels {
|
||||
if err = channel.DataChannel.SendText(string(bsInit)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *WebrtcCallFileManager) download(squadId string, dst string, fileName string, channel *webrtc.DataChannel) (err error) {
|
||||
logger.Println("got called")
|
||||
if _, dirErr := os.Stat(filepath.Join("data", "squads", squadId, fileName)); os.IsNotExist(dirErr) {
|
||||
logger.Println("file does not exist :", filepath.Join("data", "squads", squadId, fileName))
|
||||
return
|
||||
}
|
||||
f, err := os.Open(filepath.Join("data", "squads", squadId, fileName))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
bsInit, err := json.Marshal(map[string]interface{}{
|
||||
"type": HOSTED_SQUAD_DOWNLOAD_FILE_RESPONSE_INIT,
|
||||
"from": "server",
|
||||
"payload": map[string]string{
|
||||
"path": fileName,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = channel.SendText(string(bsInit)); err != nil {
|
||||
return
|
||||
}
|
||||
r := bufio.NewReader(f)
|
||||
buf := make([]byte, 0, 30000)
|
||||
logger.Println("start reading")
|
||||
for {
|
||||
n, readErr := r.Read(buf[:cap(buf)])
|
||||
buf = buf[:n]
|
||||
if n == 0 {
|
||||
if err == nil {
|
||||
logger.Println("n is 0 weird")
|
||||
break
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
log.Fatal(readErr)
|
||||
}
|
||||
bs, jsonErr := json.Marshal(map[string]interface{}{
|
||||
"type": HOSTED_SQUAD_DOWNLOAD_FILE_RESPONSE,
|
||||
"from": "server",
|
||||
"payload": map[string]interface{}{
|
||||
"path": fileName,
|
||||
"content": buf,
|
||||
},
|
||||
})
|
||||
if jsonErr != nil {
|
||||
return jsonErr
|
||||
}
|
||||
if err = channel.SendText(string(bs)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
logger.Println("stop reading")
|
||||
bs, err := json.Marshal(map[string]interface{}{
|
||||
"type": HOSTED_SQUAD_DOWNLOAD_FILE_RESPONSE_END,
|
||||
"from": "server",
|
||||
"payload": map[string]string{
|
||||
"path": fileName,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = channel.SendText(string(bs)); err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("done")
|
||||
return
|
||||
}
|
||||
845
webrtcCallManager.go
Normal file
845
webrtcCallManager.go
Normal file
@ -0,0 +1,845 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
sync "sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pion/rtcp"
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
NAME = "name"
|
||||
ID = "ID"
|
||||
SDP = "sdp"
|
||||
CANDIDATE = "webrtc_candidate"
|
||||
SQUAD_ID = "squadId"
|
||||
FROM = "from"
|
||||
TO = "to"
|
||||
STOP_CALL = "stop_call"
|
||||
LIST_HOSTED_SQUADS_BY_HOST = "list_hosted_squads_by_host"
|
||||
)
|
||||
|
||||
type Squad struct {
|
||||
ID string
|
||||
Members []string
|
||||
}
|
||||
|
||||
type WebRTCCallManager struct {
|
||||
stream GrpcManager_LinkClient
|
||||
middlewares []WebrtcCallEventManager
|
||||
ID string
|
||||
LocalSD map[string]*webrtc.SessionDescription
|
||||
RTCPeerConnections map[string]*RTCPeerConnection
|
||||
AudioTransceiver map[string][]*PeerSender
|
||||
VideoTransceiver map[string][]*PeerSender
|
||||
DataChannels map[string]*DataChannel
|
||||
PendingCandidates map[string][]*webrtc.ICECandidate
|
||||
RemoteTracks map[string][]*RemoteTrack
|
||||
Squads map[string]*Squad
|
||||
SquadMapMux *sync.RWMutex
|
||||
CandidateChannel chan *IncomingCandidate
|
||||
CandidateMux *sync.RWMutex
|
||||
RemoteTracksMux *sync.RWMutex
|
||||
RTCPeerConnectionMapMux *sync.RWMutex
|
||||
DataChannelMapMux *sync.RWMutex
|
||||
LocalSDMapMux *sync.RWMutex
|
||||
AudioSenderMux *sync.RWMutex
|
||||
VideoSenderMux *sync.RWMutex
|
||||
}
|
||||
|
||||
type IncomingCandidate struct {
|
||||
For string
|
||||
Candidate *webrtc.ICECandidateInit
|
||||
}
|
||||
|
||||
type RTCPeerConnection struct {
|
||||
*webrtc.PeerConnection
|
||||
makingOffer bool
|
||||
negotiate func(string, string)
|
||||
makingOfferLock *sync.Mutex
|
||||
}
|
||||
|
||||
type DataChannel struct {
|
||||
DataChannel *webrtc.DataChannel
|
||||
bufferedAmountLowThresholdReached <-chan struct{}
|
||||
l *int32
|
||||
}
|
||||
|
||||
type PeerSender struct {
|
||||
ID string
|
||||
Transceiver *webrtc.RTPTransceiver
|
||||
Sender *webrtc.RTPSender
|
||||
}
|
||||
|
||||
type CallEvent struct {
|
||||
EventId string `json:"eventId"`
|
||||
From string `json:"from"`
|
||||
Data []byte `json:"data"`
|
||||
Payload map[string]interface{} `json:"payload"`
|
||||
}
|
||||
|
||||
type RemoteTrack struct {
|
||||
ID string
|
||||
Track *webrtc.TrackLocalStaticRTP
|
||||
rdv *int32
|
||||
}
|
||||
|
||||
type OnICECandidateFunc func(string, *webrtc.ICECandidate) error
|
||||
|
||||
func NewWebRTCCallManager(id string, token string, eventHandlers ...WebrtcCallEventManager) (webRTCCallManager *WebRTCCallManager, err error) {
|
||||
squadList, err := loadHostedSquads(token, id)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
squads := make(map[string]*Squad)
|
||||
for _, squad := range squadList {
|
||||
squads[squad.ID] = squad
|
||||
}
|
||||
logger.Println(squads)
|
||||
webRTCCallManager = &WebRTCCallManager{
|
||||
middlewares: eventHandlers,
|
||||
ID: id,
|
||||
AudioTransceiver: make(map[string][]*PeerSender),
|
||||
VideoTransceiver: make(map[string][]*PeerSender),
|
||||
DataChannels: make(map[string]*DataChannel),
|
||||
PendingCandidates: make(map[string][]*webrtc.ICECandidate),
|
||||
LocalSD: make(map[string]*webrtc.SessionDescription),
|
||||
RTCPeerConnections: make(map[string]*RTCPeerConnection),
|
||||
RemoteTracks: make(map[string][]*RemoteTrack),
|
||||
Squads: squads,
|
||||
SquadMapMux: &sync.RWMutex{},
|
||||
RTCPeerConnectionMapMux: &sync.RWMutex{},
|
||||
LocalSDMapMux: &sync.RWMutex{},
|
||||
CandidateMux: &sync.RWMutex{},
|
||||
DataChannelMapMux: &sync.RWMutex{},
|
||||
AudioSenderMux: &sync.RWMutex{},
|
||||
VideoSenderMux: &sync.RWMutex{},
|
||||
RemoteTracksMux: &sync.RWMutex{},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func loadHostedSquads(token string, hostId string) (squads []*Squad, err error) {
|
||||
body, err := json.Marshal(map[string]interface{}{
|
||||
"type": LIST_HOSTED_SQUADS_BY_HOST,
|
||||
"token": token,
|
||||
"from": hostId,
|
||||
"payload": map[string]string{
|
||||
"host": hostId,
|
||||
"lastIndex": "0",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
res, err := http.Post("https://app.zippytal.com/req", "application/json", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
logger.Println("error come from there in webrtc call manager")
|
||||
return
|
||||
}
|
||||
bs, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(bs, &squads)
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) CreateOffer(ctx context.Context, target string, from string, cb OnICECandidateFunc) (err error) {
|
||||
peerConnection, err := wm.createPeerConnection(target, from, "aa5da62f-2800-4dd5-bf86-423cda048120", webrtc.SDPTypeOffer, cb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("connection created")
|
||||
rawOffer, err := peerConnection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = peerConnection.SetLocalDescription(rawOffer); err != nil {
|
||||
return
|
||||
}
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
logger.Println("adding for target", target)
|
||||
wm.RTCPeerConnections[target] = &RTCPeerConnection{
|
||||
PeerConnection: peerConnection,
|
||||
makingOffer: true,
|
||||
makingOfferLock: &sync.Mutex{},
|
||||
negotiate: wm.negotiate,
|
||||
}
|
||||
wm.RTCPeerConnectionMapMux.Unlock()
|
||||
err = wm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_OFFER),
|
||||
From: wm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"to": target,
|
||||
"from": wm.ID,
|
||||
"sdp": rawOffer.SDP,
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) HandleOffer(ctx context.Context, req map[string]string, cb OnICECandidateFunc) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
if _, ok := wm.Squads[req[SQUAD_ID]]; !ok {
|
||||
err = fmt.Errorf("no corresponding squad")
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
peerConnection, err := wm.createPeerConnection(req[FROM], req[TO], req[SQUAD_ID], webrtc.SDPTypeAnswer, cb)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
wm.RTCPeerConnections[req[FROM]] = &RTCPeerConnection{
|
||||
PeerConnection: peerConnection,
|
||||
makingOffer: false,
|
||||
makingOfferLock: &sync.Mutex{},
|
||||
negotiate: wm.negotiate,
|
||||
}
|
||||
wm.RTCPeerConnectionMapMux.Unlock()
|
||||
offer := webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeOffer,
|
||||
SDP: req[SDP],
|
||||
}
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
rawAnswer, err := peerConnection.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
wm.LocalSDMapMux.Lock()
|
||||
wm.LocalSD[req[FROM]] = &rawAnswer
|
||||
wm.LocalSDMapMux.Unlock()
|
||||
wm.SquadMapMux.Lock()
|
||||
wm.Squads[req[SQUAD_ID]].Members = append(wm.Squads[req[SQUAD_ID]].Members, req[FROM])
|
||||
wm.SquadMapMux.Unlock()
|
||||
if err = wm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_ANSWER),
|
||||
From: wm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"to": req[FROM],
|
||||
"from": wm.ID,
|
||||
"sdp": rawAnswer.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case err = <-errCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) HandleAnswer(ctx context.Context, req map[string]string) (err error) {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
defer wm.RTCPeerConnectionMapMux.Unlock()
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic in handle answer : %v\n", r)
|
||||
}
|
||||
}()
|
||||
if _, ok := wm.RTCPeerConnections[req[FROM]]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id : %s", req[FROM])
|
||||
return
|
||||
}
|
||||
peerConnnection := wm.RTCPeerConnections[req[FROM]]
|
||||
logger.Println("---------------------")
|
||||
logger.Println(req[SDP])
|
||||
logger.Println("---------------------")
|
||||
if err = peerConnnection.SetRemoteDescription(webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeAnswer,
|
||||
SDP: req[SDP],
|
||||
}); err != nil {
|
||||
logger.Println("error occured while setting remote description in handle answer")
|
||||
return
|
||||
}
|
||||
if err = wm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_COUNTER_OFFER),
|
||||
From: wm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": wm.ID,
|
||||
"to": req[FROM],
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
wm.CandidateMux.Lock()
|
||||
for _, candidate := range wm.PendingCandidates[req[FROM]] {
|
||||
logger.Println("sending candidate from answer to", req[FROM])
|
||||
if err = wm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_CANDIDATE),
|
||||
From: wm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": wm.ID,
|
||||
"to": req[FROM],
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
}); err != nil {
|
||||
wm.CandidateMux.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
wm.CandidateMux.Unlock()
|
||||
wm.CandidateMux.Lock()
|
||||
delete(wm.PendingCandidates, req[FROM])
|
||||
wm.CandidateMux.Unlock()
|
||||
wm.LocalSDMapMux.Lock()
|
||||
delete(wm.LocalSD, req[FROM])
|
||||
wm.LocalSDMapMux.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) HandleCounterOffer(ctx context.Context, req map[string]string) (err error) {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
if _, ok := wm.RTCPeerConnections[req[FROM]]; !ok {
|
||||
err = fmt.Errorf("no field corresponding peer connection for id %s", req[FROM])
|
||||
wm.RTCPeerConnectionMapMux.Unlock()
|
||||
return
|
||||
}
|
||||
logger.Println("handling counter offer")
|
||||
connection := wm.RTCPeerConnections[req[FROM]]
|
||||
wm.RTCPeerConnectionMapMux.Unlock()
|
||||
wm.LocalSDMapMux.Lock()
|
||||
if err = connection.SetLocalDescription(*wm.LocalSD[req[FROM]]); err != nil {
|
||||
wm.LocalSDMapMux.Unlock()
|
||||
return
|
||||
}
|
||||
wm.LocalSDMapMux.Unlock()
|
||||
wm.CandidateMux.Lock()
|
||||
for _, candidate := range wm.PendingCandidates[req[FROM]] {
|
||||
logger.Println("sending candidate to", req[FROM])
|
||||
if err = wm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_CANDIDATE),
|
||||
From: wm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": wm.ID,
|
||||
"to": req[FROM],
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
}); err != nil {
|
||||
wm.CandidateMux.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(wm.PendingCandidates, req[FROM])
|
||||
wm.CandidateMux.Unlock()
|
||||
wm.LocalSDMapMux.Lock()
|
||||
delete(wm.LocalSD, req[FROM])
|
||||
wm.LocalSDMapMux.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) createPeerConnection(target string, from string, squadId string, peerType webrtc.SDPType, cb OnICECandidateFunc) (peerConnection *webrtc.PeerConnection, err error) {
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic : %v\n", r)
|
||||
}
|
||||
}()
|
||||
config := webrtc.Configuration{
|
||||
ICEServers: []webrtc.ICEServer{
|
||||
{
|
||||
URLs: []string{"stun:stun.l.google.com:19302", "stun:stunserver.org:3478"},
|
||||
},
|
||||
},
|
||||
SDPSemantics: webrtc.SDPSemanticsUnifiedPlanWithFallback,
|
||||
}
|
||||
|
||||
peerConnection, err = webrtc.NewPeerConnection(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("---------------------------------------------------")
|
||||
if peerType == webrtc.SDPTypeAnswer {
|
||||
maxRetransmits := uint16(100)
|
||||
channel, err := peerConnection.CreateDataChannel("data", &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channel.OnOpen(func() {
|
||||
logger.Println("channel opened")
|
||||
if chanErr := channel.SendText("yooo man this is open"); chanErr != nil {
|
||||
logger.Println(chanErr)
|
||||
}
|
||||
})
|
||||
channel.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
for _, handler := range wm.middlewares {
|
||||
if err := handler.HandleCallEvent(event.From, squadId, event.EventId, event.Payload, event.Data, wm); err != nil {
|
||||
logger.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
})
|
||||
logger.Println("new channel for target : ", target)
|
||||
channel.SetBufferedAmountLowThreshold(bufferedAmountLowThreshold)
|
||||
channel.OnBufferedAmountLow(func() {
|
||||
|
||||
})
|
||||
wm.DataChannelMapMux.Lock()
|
||||
logger.Println(target)
|
||||
l := int32(0)
|
||||
wm.DataChannels[target] = &DataChannel{
|
||||
DataChannel: channel,
|
||||
l: &l,
|
||||
}
|
||||
wm.DataChannelMapMux.Unlock()
|
||||
} else {
|
||||
peerConnection.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
wm.DataChannelMapMux.Lock()
|
||||
l := int32(0)
|
||||
wm.DataChannels[target] = &DataChannel{
|
||||
DataChannel: dc,
|
||||
l: &l,
|
||||
}
|
||||
wm.DataChannelMapMux.Unlock()
|
||||
dc.OnOpen(func() {
|
||||
logger.Printf("got a new open datachannel %s\n", dc.Label())
|
||||
})
|
||||
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
for _, handler := range wm.middlewares {
|
||||
if err := handler.HandleCallEvent(event.From, squadId, event.EventId, event.Payload, event.Data, wm); err != nil {
|
||||
logger.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
wm.RemoteTracksMux.RLock()
|
||||
for _, id := range wm.Squads[squadId].Members {
|
||||
if id != target {
|
||||
<-time.NewTimer(time.Millisecond * 50).C
|
||||
if _, ok := wm.RemoteTracks[id]; !ok {
|
||||
continue
|
||||
}
|
||||
for _, track := range wm.RemoteTracks[id] {
|
||||
transceiver, err := peerConnection.AddTransceiverFromKind(track.Track.Kind(), webrtc.RTPTransceiverInit{Direction: webrtc.RTPTransceiverDirectionSendonly})
|
||||
if err != nil {
|
||||
logger.Println("add track error")
|
||||
continue
|
||||
}
|
||||
if err := transceiver.Sender().ReplaceTrack(track.Track); err != nil {
|
||||
logger.Println("add track error")
|
||||
continue
|
||||
}
|
||||
if track.Track.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
wm.VideoSenderMux.Lock()
|
||||
if len(wm.VideoTransceiver) == 0 {
|
||||
wm.VideoTransceiver[id] = []*PeerSender{{ID: target, Transceiver: transceiver}}
|
||||
} else {
|
||||
wm.VideoTransceiver[id] = append(wm.VideoTransceiver[id], &PeerSender{ID: target, Transceiver: transceiver})
|
||||
}
|
||||
wm.VideoSenderMux.Unlock()
|
||||
} else if track.Track.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
wm.AudioSenderMux.Lock()
|
||||
if len(wm.AudioTransceiver) == 0 {
|
||||
wm.AudioTransceiver[id] = []*PeerSender{{ID: target, Transceiver: transceiver}}
|
||||
} else {
|
||||
wm.AudioTransceiver[id] = append(wm.AudioTransceiver[id], &PeerSender{ID: target, Transceiver: transceiver})
|
||||
}
|
||||
wm.AudioSenderMux.Unlock()
|
||||
}
|
||||
logger.Println("track added", track)
|
||||
}
|
||||
}
|
||||
}
|
||||
wm.RemoteTracksMux.RUnlock()
|
||||
peerConnection.OnConnectionStateChange(func(pcs webrtc.PeerConnectionState) {
|
||||
if pcs == webrtc.PeerConnectionStateClosed || pcs == webrtc.PeerConnectionStateDisconnected || pcs == webrtc.PeerConnectionStateFailed {
|
||||
logger.Println(pcs)
|
||||
wm.HandleLeavingMember(target, squadId)
|
||||
}
|
||||
})
|
||||
peerConnection.OnICEConnectionStateChange(func(is webrtc.ICEConnectionState) {
|
||||
logger.Printf("ICE connection state has changed %s\n", is.String())
|
||||
if is == webrtc.ICEConnectionStateDisconnected || is == webrtc.ICEConnectionStateFailed {
|
||||
logger.Println(is)
|
||||
}
|
||||
})
|
||||
peerConnection.OnTrack(func(tr *webrtc.TrackRemote, r *webrtc.RTPReceiver) {
|
||||
logger.Println("got new track")
|
||||
defer func() {
|
||||
if stopErr := r.Stop(); stopErr != nil {
|
||||
logger.Println(stopErr)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
ticker := time.NewTicker(1500 * time.Millisecond)
|
||||
for range ticker.C {
|
||||
if rtcpSendErr := peerConnection.WriteRTCP([]rtcp.Packet{&rtcp.PictureLossIndication{MediaSSRC: uint32(tr.SSRC())}}); rtcpSendErr != nil {
|
||||
logger.Println(rtcpSendErr)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
uniqId := uuid.New()
|
||||
i := fmt.Sprintf("%s/%s", target, uniqId.String())
|
||||
localTrack, newTrackErr := webrtc.NewTrackLocalStaticRTP(tr.Codec().RTPCodecCapability, i, i)
|
||||
if newTrackErr != nil {
|
||||
return
|
||||
}
|
||||
logger.Println(localTrack)
|
||||
rtpbuf := make([]byte, 1400)
|
||||
flag := int32(0)
|
||||
remote := &RemoteTrack{ID: target, Track: localTrack, rdv: &flag}
|
||||
wm.RemoteTracksMux.Lock()
|
||||
if len(wm.RemoteTracks[target]) == 0 {
|
||||
wm.RemoteTracks[target] = []*RemoteTrack{remote}
|
||||
} else {
|
||||
wm.RemoteTracks[target] = append(wm.RemoteTracks[target], remote)
|
||||
}
|
||||
index := len(wm.RemoteTracks[target])
|
||||
logger.Println(index, wm.RemoteTracks)
|
||||
wm.RemoteTracksMux.Unlock()
|
||||
wm.SquadMapMux.RLock()
|
||||
for _, id := range wm.Squads[squadId].Members {
|
||||
if id != target {
|
||||
if _, ok := wm.RTCPeerConnections[id]; !ok {
|
||||
continue
|
||||
}
|
||||
connection := wm.RTCPeerConnections[id]
|
||||
transceiver, tranceiverErr := connection.AddTransceiverFromKind(localTrack.Kind(), webrtc.RTPTransceiverInit{Direction: webrtc.RTPTransceiverDirectionSendonly})
|
||||
if tranceiverErr != nil {
|
||||
logger.Println(tranceiverErr)
|
||||
continue
|
||||
}
|
||||
if replaceTrackErr := transceiver.Sender().ReplaceTrack(localTrack); replaceTrackErr != nil {
|
||||
logger.Println(replaceTrackErr)
|
||||
continue
|
||||
}
|
||||
go func() {
|
||||
rtcpBuf := make([]byte, 1500)
|
||||
for {
|
||||
if _, _, rtcpErr := transceiver.Sender().Read(rtcpBuf); rtcpErr != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
if localTrack.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
wm.AudioSenderMux.Lock()
|
||||
if len(wm.AudioTransceiver) == 0 {
|
||||
wm.AudioTransceiver[target] = []*PeerSender{{ID: id, Transceiver: transceiver}}
|
||||
} else {
|
||||
wm.AudioTransceiver[target] = append(wm.AudioTransceiver[target], &PeerSender{ID: id, Transceiver: transceiver})
|
||||
}
|
||||
wm.AudioSenderMux.Unlock()
|
||||
} else if localTrack.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
wm.VideoSenderMux.Lock()
|
||||
if len(wm.VideoTransceiver) == 0 {
|
||||
wm.VideoTransceiver[target] = []*PeerSender{{ID: id, Transceiver: transceiver}}
|
||||
} else {
|
||||
wm.VideoTransceiver[target] = append(wm.VideoTransceiver[target], &PeerSender{ID: id, Transceiver: transceiver})
|
||||
}
|
||||
wm.VideoSenderMux.Unlock()
|
||||
}
|
||||
go func() {
|
||||
<-time.NewTimer(time.Second).C
|
||||
connection.negotiate(id, squadId)
|
||||
}()
|
||||
}
|
||||
}
|
||||
wm.SquadMapMux.RUnlock()
|
||||
for {
|
||||
i, _, readErr := tr.Read(rtpbuf)
|
||||
if readErr != nil {
|
||||
logger.Println(readErr)
|
||||
break
|
||||
}
|
||||
f := atomic.LoadInt32(remote.rdv)
|
||||
if f == 0 {
|
||||
if _, writeErr := localTrack.Write(rtpbuf[:i]); writeErr != nil && !errors.Is(writeErr, io.ErrClosedPipe) {
|
||||
logger.Println(writeErr)
|
||||
break
|
||||
} else {
|
||||
_ = rtpbuf[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
peerConnection.OnICECandidate(func(i *webrtc.ICECandidate) {
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
wm.CandidateMux.Lock()
|
||||
defer wm.CandidateMux.Unlock()
|
||||
desc := peerConnection.RemoteDescription()
|
||||
if desc == nil {
|
||||
logger.Println("generated candidate appended to list : ", i)
|
||||
wm.PendingCandidates[target] = append(wm.PendingCandidates[target], i)
|
||||
} else {
|
||||
logger.Println("generated candidate : ", i)
|
||||
if iceCandidateErr := cb(target, i); iceCandidateErr != nil {
|
||||
logger.Println(iceCandidateErr)
|
||||
}
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) HandleRennegotiationOffer(from string, sdp string) (err error) {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
defer wm.RTCPeerConnectionMapMux.Unlock()
|
||||
if _, ok := wm.RTCPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
wm.RTCPeerConnections[from].makingOfferLock.Lock()
|
||||
if wm.RTCPeerConnections[from].makingOffer {
|
||||
wm.RTCPeerConnections[from].makingOfferLock.Unlock()
|
||||
return fmt.Errorf("already making an offer or state is stable")
|
||||
}
|
||||
wm.RTCPeerConnections[from].makingOfferLock.Unlock()
|
||||
if err = wm.RTCPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeOffer}); err != nil {
|
||||
return
|
||||
}
|
||||
localSd, err := wm.RTCPeerConnections[from].CreateAnswer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = wm.RTCPeerConnections[from].SetLocalDescription(localSd); err != nil {
|
||||
return
|
||||
}
|
||||
if err = wm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_RENNEGOTIATION_ANSWER),
|
||||
From: wm.ID,
|
||||
Token: "",
|
||||
Payload: map[string]string{
|
||||
"to": from,
|
||||
"sdp": localSd.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) HandleRennegotiationAnswer(from string, sdp string) (err error) {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
defer wm.RTCPeerConnectionMapMux.Unlock()
|
||||
wm.RTCPeerConnections[from].makingOfferLock.Lock()
|
||||
if wm.RTCPeerConnections[from].makingOffer {
|
||||
wm.RTCPeerConnections[from].makingOfferLock.Unlock()
|
||||
return fmt.Errorf("already making an offer or state is stable")
|
||||
}
|
||||
wm.RTCPeerConnections[from].makingOfferLock.Unlock()
|
||||
if _, ok := wm.RTCPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
err = wm.RTCPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeAnswer})
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) AddCandidate(candidate *webrtc.ICECandidateInit, from string) (err error) {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
defer wm.RTCPeerConnectionMapMux.Unlock()
|
||||
if candidate != nil {
|
||||
err = wm.RTCPeerConnections[from].AddICECandidate(*candidate)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) HandleLeavingMember(id string, squadId string) {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
if _, ok := wm.RTCPeerConnections[id]; !ok {
|
||||
wm.RTCPeerConnectionMapMux.Unlock()
|
||||
return
|
||||
}
|
||||
wm.RTCPeerConnectionMapMux.Unlock()
|
||||
defer func() {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
if _, ok := wm.RTCPeerConnections[id]; ok {
|
||||
if closeErr := wm.RTCPeerConnections[id].Close(); closeErr != nil {
|
||||
logger.Println("peer connection close error", closeErr)
|
||||
}
|
||||
}
|
||||
delete(wm.RTCPeerConnections, id)
|
||||
wm.RTCPeerConnectionMapMux.Unlock()
|
||||
}()
|
||||
logger.Printf("peer %s is leaving the squad\n", id)
|
||||
wm.DataChannelMapMux.Lock()
|
||||
if _, ok := wm.DataChannels[id]; ok {
|
||||
wm.DataChannels[id].DataChannel.Close()
|
||||
}
|
||||
delete(wm.DataChannels, id)
|
||||
wm.DataChannelMapMux.Unlock()
|
||||
wm.LocalSDMapMux.Lock()
|
||||
delete(wm.LocalSD, id)
|
||||
wm.LocalSDMapMux.Unlock()
|
||||
delete(wm.PendingCandidates, id)
|
||||
wm.RemoteTracksMux.Lock()
|
||||
delete(wm.RemoteTracks, id)
|
||||
wm.RemoteTracksMux.Unlock()
|
||||
wm.AudioSenderMux.Lock()
|
||||
for peerId, peerSender := range wm.AudioTransceiver {
|
||||
if peerId != id {
|
||||
logger.Println("senders", peerSender)
|
||||
c := 0
|
||||
for i, sender := range peerSender {
|
||||
if sender.ID == id {
|
||||
if senderErr := sender.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if transceiverErr := sender.Transceiver.Stop(); transceiverErr != nil {
|
||||
logger.Println("transceiverErr occured with video", transceiverErr)
|
||||
}
|
||||
peerSender[len(peerSender)-i-1], peerSender[i] = peerSender[i], peerSender[len(peerSender)-i-1]
|
||||
c++
|
||||
}
|
||||
}
|
||||
wm.AudioTransceiver[peerId] = wm.AudioTransceiver[peerId][:len(peerSender)-(c)]
|
||||
logger.Println(wm.AudioTransceiver[peerId])
|
||||
}
|
||||
}
|
||||
for _, transceiver := range wm.AudioTransceiver[id] {
|
||||
if senderErr := transceiver.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if stopErr := transceiver.Transceiver.Stop(); stopErr != nil {
|
||||
logger.Println("transceiver audio stop error", stopErr)
|
||||
}
|
||||
}
|
||||
delete(wm.AudioTransceiver, id)
|
||||
wm.AudioSenderMux.Unlock()
|
||||
wm.VideoSenderMux.Lock()
|
||||
for peerId, peerSender := range wm.VideoTransceiver {
|
||||
if peerId != id {
|
||||
c := 0
|
||||
logger.Println("senders", peerSender)
|
||||
for i, sender := range peerSender {
|
||||
if sender.ID == id {
|
||||
if senderErr := sender.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if transceiverErr := sender.Transceiver.Stop(); transceiverErr != nil {
|
||||
logger.Println("transceiverErr occured with video", transceiverErr)
|
||||
}
|
||||
peerSender[len(peerSender)-i-1], peerSender[i] = peerSender[i], peerSender[len(peerSender)-i-1]
|
||||
c++
|
||||
}
|
||||
}
|
||||
wm.VideoTransceiver[peerId] = wm.VideoTransceiver[peerId][:len(peerSender)-(c)]
|
||||
logger.Println(wm.VideoTransceiver[peerId])
|
||||
}
|
||||
}
|
||||
for _, transceiver := range wm.VideoTransceiver[id] {
|
||||
if senderErr := transceiver.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if stopErr := transceiver.Transceiver.Stop(); stopErr != nil {
|
||||
logger.Println("transceiver video stop error", stopErr)
|
||||
}
|
||||
}
|
||||
delete(wm.VideoTransceiver, id)
|
||||
wm.VideoSenderMux.Unlock()
|
||||
if _, ok := wm.Squads[squadId]; ok {
|
||||
wm.SquadMapMux.Lock()
|
||||
var index int
|
||||
for i, v := range wm.Squads[squadId].Members {
|
||||
if v == id {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(wm.Squads[squadId].Members) < 2 {
|
||||
wm.Squads[squadId].Members = []string{}
|
||||
} else {
|
||||
wm.Squads[squadId].Members = append(wm.Squads[squadId].Members[:index], wm.Squads[squadId].Members[index+1:]...)
|
||||
}
|
||||
wm.SquadMapMux.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (wm *WebRTCCallManager) negotiate(target string, squadId string) {
|
||||
wm.RTCPeerConnectionMapMux.Lock()
|
||||
defer wm.RTCPeerConnectionMapMux.Unlock()
|
||||
if _, ok := wm.RTCPeerConnections[target]; !ok {
|
||||
return
|
||||
}
|
||||
wm.RTCPeerConnections[target].makingOfferLock.Lock()
|
||||
wm.RTCPeerConnections[target].makingOffer = true
|
||||
wm.RTCPeerConnections[target].makingOfferLock.Unlock()
|
||||
defer func() {
|
||||
wm.RTCPeerConnections[target].makingOfferLock.Lock()
|
||||
wm.RTCPeerConnections[target].makingOffer = false
|
||||
wm.RTCPeerConnections[target].makingOfferLock.Unlock()
|
||||
}()
|
||||
wm.SquadMapMux.RLock()
|
||||
defer wm.SquadMapMux.RUnlock()
|
||||
for _, id := range wm.Squads[squadId].Members {
|
||||
if _, ok := wm.RTCPeerConnections[id]; !ok {
|
||||
continue
|
||||
}
|
||||
connection := wm.RTCPeerConnections[id]
|
||||
if connection.SignalingState() == webrtc.SignalingStateStable {
|
||||
localSd, err := connection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if err = connection.SetLocalDescription(localSd); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if err = wm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_RENNEGOTIATION_OFFER),
|
||||
From: wm.ID,
|
||||
Token: "",
|
||||
Payload: map[string]string{
|
||||
"to": id,
|
||||
"sdp": localSd.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
234
webrtcCallSoundManager.go
Normal file
234
webrtcCallSoundManager.go
Normal file
@ -0,0 +1,234 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
SPEAKING = "speaking"
|
||||
STOP_SPEAKING = "stop_speaking"
|
||||
MUTE = "mute"
|
||||
UNMUTE = "unmute"
|
||||
)
|
||||
|
||||
type WebrtcCallSoundManager struct{}
|
||||
|
||||
func NewWebrtcCallSoundManager() *WebrtcCallSoundManager {
|
||||
return new(WebrtcCallSoundManager)
|
||||
}
|
||||
|
||||
func (w *WebrtcCallSoundManager) HandleCallEvent(from string, squadId string, eventId string, payload map[string]interface{}, data []byte, manager *WebRTCCallManager) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
logger.Println("got an event in call sound manager", from, eventId, payload)
|
||||
switch eventId {
|
||||
case UNMUTE:
|
||||
if e := w.unmute(from, squadId, manager); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
case MUTE:
|
||||
if e := w.mute(from, squadId, manager); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
case SPEAKING:
|
||||
if e := w.sendSpeakingEvent(from, squadId, manager); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
case STOP_SPEAKING:
|
||||
if e := w.sendStopSpeakingEvent(from, squadId, manager); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case err = <-errCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WebrtcCallSoundManager) unmute(peerId string, squadId string, manager *WebRTCCallManager) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
logger.Println("sending unnmute event", peerId)
|
||||
manager.RemoteTracksMux.RLock()
|
||||
for _, v := range manager.RemoteTracks[peerId] {
|
||||
if v.Track.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
atomic.SwapInt32(v.rdv, 0)
|
||||
}
|
||||
}
|
||||
manager.RemoteTracksMux.RUnlock()
|
||||
manager.SquadMapMux.Lock()
|
||||
defer manager.SquadMapMux.Unlock()
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
manager.DataChannelMapMux.Lock()
|
||||
if _, ok := manager.DataChannels[member]; ok && member != peerId {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": UNMUTE,
|
||||
"from": peerId,
|
||||
"payload": map[string]interface{}{},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
continue
|
||||
}
|
||||
lock:
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break lock
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *WebrtcCallSoundManager) mute(peerId string, squadId string, manager *WebRTCCallManager) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
logger.Println("sending mute event", peerId)
|
||||
manager.RemoteTracksMux.RLock()
|
||||
for _, v := range manager.RemoteTracks[peerId] {
|
||||
if v.Track.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
atomic.SwapInt32(v.rdv, 1)
|
||||
}
|
||||
}
|
||||
manager.RemoteTracksMux.RUnlock()
|
||||
manager.SquadMapMux.Lock()
|
||||
defer manager.SquadMapMux.Unlock()
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
manager.DataChannelMapMux.Lock()
|
||||
if _, ok := manager.DataChannels[member]; ok && member != peerId {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": MUTE,
|
||||
"from": peerId,
|
||||
"payload": map[string]interface{}{},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
continue
|
||||
}
|
||||
lock:
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break lock
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *WebrtcCallSoundManager) sendSpeakingEvent(peerId string, squadId string, manager *WebRTCCallManager) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
logger.Println("sending speaking event", peerId)
|
||||
manager.SquadMapMux.Lock()
|
||||
defer manager.SquadMapMux.Unlock()
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
manager.DataChannelMapMux.Lock()
|
||||
if _, ok := manager.DataChannels[member]; ok && member != peerId {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": SPEAKING,
|
||||
"from": peerId,
|
||||
"payload": map[string]interface{}{
|
||||
"userId": peerId,
|
||||
"speaking": true,
|
||||
},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
continue
|
||||
}
|
||||
lock:
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break lock
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *WebrtcCallSoundManager) sendStopSpeakingEvent(peerId string, squadId string, manager *WebRTCCallManager) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
logger.Println("sending stop speaking event", peerId)
|
||||
manager.SquadMapMux.Lock()
|
||||
defer manager.SquadMapMux.Unlock()
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
manager.DataChannelMapMux.Lock()
|
||||
if _, ok := manager.DataChannels[member]; ok && member != peerId {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": STOP_SPEAKING,
|
||||
"from": peerId,
|
||||
"payload": map[string]interface{}{
|
||||
"userId": peerId,
|
||||
"speaking": false,
|
||||
},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
continue
|
||||
}
|
||||
lock:
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break lock
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
136
webrtcCallVideoManager.go
Normal file
136
webrtcCallVideoManager.go
Normal file
@ -0,0 +1,136 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
VIDEO = "video"
|
||||
STOP_VIDEO = "stop_video"
|
||||
)
|
||||
|
||||
type WebrtcCallVideoManager struct{}
|
||||
|
||||
func NewWebrtcCallVideoManager() *WebrtcCallVideoManager {
|
||||
return new(WebrtcCallVideoManager)
|
||||
}
|
||||
|
||||
func (w *WebrtcCallVideoManager) HandleCallEvent(from string, squadId string, eventId string, payload map[string]interface{}, data []byte, manager *WebRTCCallManager) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
logger.Println("got an event in call video manager", from, eventId, payload)
|
||||
switch eventId {
|
||||
case VIDEO:
|
||||
if e := w.video(from, squadId, manager); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
case STOP_VIDEO:
|
||||
if e := w.stopVideo(from, squadId, manager); e != nil {
|
||||
errCh <- e
|
||||
return
|
||||
}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case err = <-errCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WebrtcCallVideoManager) video(peerId string, squadId string, manager *WebRTCCallManager) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
logger.Println("sending video event", peerId)
|
||||
manager.RemoteTracksMux.RLock()
|
||||
for _, v := range manager.RemoteTracks[peerId] {
|
||||
if v.Track.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
atomic.SwapInt32(v.rdv, 0)
|
||||
}
|
||||
}
|
||||
manager.RemoteTracksMux.RUnlock()
|
||||
manager.SquadMapMux.Lock()
|
||||
defer manager.SquadMapMux.Unlock()
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
manager.DataChannelMapMux.Lock()
|
||||
if _, ok := manager.DataChannels[member]; ok && member != peerId {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": VIDEO,
|
||||
"from": peerId,
|
||||
"payload": map[string]interface{}{},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
continue
|
||||
}
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *WebrtcCallVideoManager) stopVideo(peerId string, squadId string, manager *WebRTCCallManager) (err error) {
|
||||
if _, ok := manager.Squads[squadId]; !ok {
|
||||
err = fmt.Errorf("no correponding squad found")
|
||||
return
|
||||
}
|
||||
logger.Println("sending stop video event", peerId)
|
||||
manager.RemoteTracksMux.RLock()
|
||||
for _, v := range manager.RemoteTracks[peerId] {
|
||||
if v.Track.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
atomic.SwapInt32(v.rdv, 1)
|
||||
}
|
||||
}
|
||||
manager.RemoteTracksMux.RUnlock()
|
||||
manager.SquadMapMux.Lock()
|
||||
defer manager.SquadMapMux.Unlock()
|
||||
for _, member := range manager.Squads[squadId].Members {
|
||||
manager.DataChannelMapMux.Lock()
|
||||
if _, ok := manager.DataChannels[member]; ok && member != peerId {
|
||||
bs, marshalErr := json.Marshal(map[string]interface{}{
|
||||
"type": STOP_VIDEO,
|
||||
"from": peerId,
|
||||
"payload": map[string]interface{}{},
|
||||
})
|
||||
if marshalErr != nil {
|
||||
logger.Println(err)
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
continue
|
||||
}
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(manager.DataChannels[member].l, 0, 1) {
|
||||
defer atomic.SwapInt32(manager.DataChannels[member].l, 0)
|
||||
if sendErr := manager.DataChannels[member].DataChannel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println(sendErr)
|
||||
}
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.DataChannelMapMux.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
441
webrtcFsManager.go
Normal file
441
webrtcFsManager.go
Normal file
@ -0,0 +1,441 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
context "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
sync "sync"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
type WebrtcFsManager struct {
|
||||
stream GrpcManager_LinkClient
|
||||
DatachannelManager DataChannelManager
|
||||
LocalSD map[string]*webrtc.SessionDescription
|
||||
RTCPeerConnections map[string]*webrtc.PeerConnection
|
||||
DataChannels map[string]*webrtc.DataChannel
|
||||
PendingCandidates map[string][]*webrtc.ICECandidate
|
||||
CandidateChannel chan *IncomingCandidate
|
||||
CandidateMux *sync.RWMutex
|
||||
RTCPeerConnectionMapMux *sync.RWMutex
|
||||
DataChannelMapMux *sync.RWMutex
|
||||
LocalSDMapMux *sync.RWMutex
|
||||
}
|
||||
|
||||
func NewWebrtcFsManager(dataChannelManager DataChannelManager) (webrtcFsManager *WebrtcFsManager, err error) {
|
||||
webrtcFsManager = &WebrtcFsManager{
|
||||
DatachannelManager: dataChannelManager,
|
||||
DataChannels: make(map[string]*webrtc.DataChannel),
|
||||
PendingCandidates: make(map[string][]*webrtc.ICECandidate),
|
||||
LocalSD: make(map[string]*webrtc.SessionDescription),
|
||||
RTCPeerConnections: make(map[string]*webrtc.PeerConnection),
|
||||
RTCPeerConnectionMapMux: &sync.RWMutex{},
|
||||
LocalSDMapMux: &sync.RWMutex{},
|
||||
CandidateMux: &sync.RWMutex{},
|
||||
DataChannelMapMux: &sync.RWMutex{},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) CreateOffer(ctx context.Context, target string, from string, cb OnICECandidateFunc) (err error) {
|
||||
peerConnection, err := wf.createPeerConnection(target, from, webrtc.SDPTypeOffer, cb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rawOffer, err := peerConnection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = peerConnection.SetLocalDescription(rawOffer); err != nil {
|
||||
return
|
||||
}
|
||||
wf.RTCPeerConnectionMapMux.Lock()
|
||||
logger.Println("adding for target", target)
|
||||
wf.RTCPeerConnections[target] = peerConnection
|
||||
wf.RTCPeerConnectionMapMux.Unlock()
|
||||
err = wf.stream.Send(&Request{
|
||||
Type: string(WEBRTC_OFFER_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"to": target,
|
||||
"from": "lolo_local_serv",
|
||||
"sdp": rawOffer.SDP,
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) HandleOffer(ctx context.Context, req map[string]string, cb OnICECandidateFunc) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
peerConnection, err := wf.createPeerConnection(req[FROM], req[TO], webrtc.SDPTypeAnswer, cb)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
wf.RTCPeerConnectionMapMux.Lock()
|
||||
wf.RTCPeerConnections[req[FROM]] = peerConnection
|
||||
wf.RTCPeerConnectionMapMux.Unlock()
|
||||
offer := webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeOffer,
|
||||
SDP: req[SDP],
|
||||
}
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
rawAnswer, err := peerConnection.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
wf.LocalSDMapMux.Lock()
|
||||
wf.LocalSD[req[FROM]] = &rawAnswer
|
||||
wf.LocalSDMapMux.Unlock()
|
||||
if err = wf.stream.Send(&Request{
|
||||
Type: string(WEBRTC_ANSWER_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"to": req[FROM],
|
||||
"from": "lolo_local_serv",
|
||||
"sdp": rawAnswer.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case err = <-errCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) HandleAnswer(ctx context.Context, req map[string]string) (err error) {
|
||||
wf.RTCPeerConnectionMapMux.RLock()
|
||||
defer wf.RTCPeerConnectionMapMux.RUnlock()
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic : %v\n", r)
|
||||
}
|
||||
}()
|
||||
if _, ok := wf.RTCPeerConnections[req[FROM]]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id : %s", req[FROM])
|
||||
return
|
||||
}
|
||||
peerConnnection := wf.RTCPeerConnections[req[FROM]]
|
||||
if err = peerConnnection.SetRemoteDescription(webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeAnswer,
|
||||
SDP: req[SDP],
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if err = wf.stream.Send(&Request{
|
||||
Type: string(WEBRTC_COUNTER_OFFER_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": "lolo_local_serv",
|
||||
"to": req[FROM],
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
wf.CandidateMux.RLock()
|
||||
for _, candidate := range wf.PendingCandidates[req[FROM]] {
|
||||
logger.Println("sending candidate to", req[FROM])
|
||||
if err = wf.stream.Send(&Request{
|
||||
Type: string(WEBRTC_CANDIDATE_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": "lolo_local_serv",
|
||||
"to": req[FROM],
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
wf.CandidateMux.RUnlock()
|
||||
wf.CandidateMux.Lock()
|
||||
delete(wf.PendingCandidates, req[FROM])
|
||||
wf.CandidateMux.Unlock()
|
||||
wf.LocalSDMapMux.Lock()
|
||||
delete(wf.LocalSD, req[FROM])
|
||||
wf.LocalSDMapMux.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) HandleCounterOffer(ctx context.Context, req map[string]string) (err error) {
|
||||
wf.RTCPeerConnectionMapMux.RLock()
|
||||
if _, ok := wf.RTCPeerConnections[req[FROM]]; !ok {
|
||||
err = fmt.Errorf("no field corresponding peer connection for id %s", req[FROM])
|
||||
return
|
||||
}
|
||||
connection := wf.RTCPeerConnections[req[FROM]]
|
||||
wf.RTCPeerConnectionMapMux.RUnlock()
|
||||
wf.LocalSDMapMux.RLock()
|
||||
if err = connection.SetLocalDescription(*wf.LocalSD[req[FROM]]); err != nil {
|
||||
return
|
||||
}
|
||||
wf.LocalSDMapMux.RUnlock()
|
||||
wf.CandidateMux.RLock()
|
||||
for _, candidate := range wf.PendingCandidates[req[FROM]] {
|
||||
logger.Println("sending candidate to", req[FROM])
|
||||
if err = wf.stream.Send(&Request{
|
||||
Type: string(WEBRTC_CANDIDATE_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": "lolo_local_serv",
|
||||
"to": req[FROM],
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
wf.CandidateMux.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) createPeerConnection(target string, from string, peerType webrtc.SDPType, cb OnICECandidateFunc) (peerConnection *webrtc.PeerConnection, err error) {
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic : %v\n", r)
|
||||
}
|
||||
}()
|
||||
s := webrtc.SettingEngine{}
|
||||
s.DetachDataChannels()
|
||||
|
||||
api := webrtc.NewAPI(webrtc.WithSettingEngine(s))
|
||||
|
||||
config := webrtc.Configuration{
|
||||
ICEServers: []webrtc.ICEServer{
|
||||
{
|
||||
URLs: []string{"stun:stun.l.google.com:19302", "stun:stunserver.org:3478", "stun:stun.l.google.com:19302?transport=tcp"},
|
||||
},
|
||||
},
|
||||
SDPSemantics: webrtc.SDPSemanticsUnifiedPlan,
|
||||
}
|
||||
peerConnection, err = api.NewPeerConnection(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if peerType == webrtc.SDPTypeOffer {
|
||||
maxRetransmits := uint16(100)
|
||||
channel, err := peerConnection.CreateDataChannel("data", &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channel.OnOpen(func() {
|
||||
logger.Println("channel opened")
|
||||
if chanErr := channel.SendText("yooo man this is open"); chanErr != nil {
|
||||
logger.Println(chanErr)
|
||||
}
|
||||
})
|
||||
channel.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
logger.Printf("new message %s\n", string(msg.Data))
|
||||
done, errCh := wf.DatachannelManager.HandleMessage(&DatachannelMessage{
|
||||
From: target,
|
||||
Type: "test",
|
||||
Payload: &DatachannelMessagePayload{},
|
||||
}, channel)
|
||||
select {
|
||||
case <-done:
|
||||
//logger.Println("done with success")
|
||||
case e := <-errCh:
|
||||
logger.Println(e)
|
||||
logger.Println("this is impossible")
|
||||
}
|
||||
})
|
||||
wf.DataChannelMapMux.Lock()
|
||||
wf.DataChannels[target] = channel
|
||||
wf.DataChannelMapMux.Unlock()
|
||||
}
|
||||
peerConnection.OnICEConnectionStateChange(func(is webrtc.ICEConnectionState) {
|
||||
logger.Printf("ICE connection state has changed %s\n", is.String())
|
||||
if is == webrtc.ICEConnectionStateDisconnected || is == webrtc.ICEConnectionStateFailed {
|
||||
logger.Println(is)
|
||||
}
|
||||
})
|
||||
peerConnection.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
dc.OnOpen(func() {
|
||||
logger.Printf("got a new open datachannel %s\n", dc.Label())
|
||||
dataChann, err := dc.Detach()
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
var x []byte = make([]byte, 2<<15)
|
||||
n, _, err := dataChann.ReadDataChannel(x)
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
go func(msg []byte) {
|
||||
var dataChannelMessage DatachannelMessage
|
||||
if unmarshalErr := json.Unmarshal(msg, &dataChannelMessage); unmarshalErr != nil {
|
||||
logger.Println(unmarshalErr)
|
||||
return
|
||||
}
|
||||
done, errCh := wf.DatachannelManager.HandleMessage(&DatachannelMessage{
|
||||
From: dataChannelMessage.From,
|
||||
Type: dataChannelMessage.Type,
|
||||
Payload: dataChannelMessage.Payload,
|
||||
}, dc)
|
||||
select {
|
||||
case <-done:
|
||||
//logger.Println("done with success")
|
||||
case e := <-errCh:
|
||||
logger.Println(e)
|
||||
logger.Println("this is impossible")
|
||||
}
|
||||
}(x[:n])
|
||||
}
|
||||
})
|
||||
// dc.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
// var dataChannelMessage DatachannelMessage
|
||||
// if unmarshalErr := json.Unmarshal(msg.Data, &dataChannelMessage); unmarshalErr != nil {
|
||||
// logger.Println(unmarshalErr)
|
||||
// return
|
||||
// }
|
||||
// done, errCh := wf.DatachannelManager.HandleMessage(&DatachannelMessage{
|
||||
// From: dataChannelMessage.From,
|
||||
// Type: dataChannelMessage.Type,
|
||||
// Payload: dataChannelMessage.Payload,
|
||||
// }, dc)
|
||||
// select {
|
||||
// case <-done:
|
||||
// //logger.Println("done with success")
|
||||
// case e := <-errCh:
|
||||
// logger.Println(e)
|
||||
// logger.Println("this is impossible")
|
||||
// }
|
||||
// })
|
||||
})
|
||||
peerConnection.OnICECandidate(func(i *webrtc.ICECandidate) {
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
wf.CandidateMux.Lock()
|
||||
defer wf.CandidateMux.Unlock()
|
||||
desc := peerConnection.RemoteDescription()
|
||||
if desc == nil {
|
||||
wf.PendingCandidates[target] = append(wf.PendingCandidates[target], i)
|
||||
} else {
|
||||
logger.Println(i)
|
||||
if iceCandidateErr := cb(target, i); iceCandidateErr != nil {
|
||||
logger.Println(iceCandidateErr)
|
||||
}
|
||||
}
|
||||
})
|
||||
peerConnection.OnNegotiationNeeded(func() {
|
||||
if peerConnection.SignalingState() != webrtc.SignalingStateHaveLocalOffer && peerConnection.SignalingState() != webrtc.SignalingStateHaveRemoteOffer {
|
||||
wf.RTCPeerConnectionMapMux.Lock()
|
||||
defer wf.RTCPeerConnectionMapMux.Unlock()
|
||||
for id, connection := range wf.RTCPeerConnections {
|
||||
if connection.SignalingState() != webrtc.SignalingStateHaveLocalOffer && connection.SignalingState() != webrtc.SignalingStateHaveRemoteOffer {
|
||||
localSd, err := connection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if err = connection.SetLocalDescription(localSd); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if err = wf.stream.Send(&Request{
|
||||
Type: string(WEBRTC_RENNEGOTIATION_OFFER_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "",
|
||||
Payload: map[string]string{
|
||||
"to": id,
|
||||
"sdp": localSd.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) HandleRennegotiationOffer(from string, dst string, sdp string) (err error) {
|
||||
wf.RTCPeerConnectionMapMux.Lock()
|
||||
defer wf.RTCPeerConnectionMapMux.Unlock()
|
||||
if _, ok := wf.RTCPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
if wf.RTCPeerConnections[from].SignalingState() != webrtc.SignalingStateStable {
|
||||
err = fmt.Errorf("rennego called in wrong state")
|
||||
return
|
||||
}
|
||||
if err = wf.RTCPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeOffer}); err != nil {
|
||||
return
|
||||
}
|
||||
localSd, err := wf.RTCPeerConnections[from].CreateAnswer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = wf.RTCPeerConnections[from].SetLocalDescription(localSd); err != nil {
|
||||
return
|
||||
}
|
||||
if err = wf.stream.Send(&Request{
|
||||
Type: string(WEBRTC_RENNEGOTIATION_ANSWER_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "",
|
||||
Payload: map[string]string{
|
||||
"to": from,
|
||||
"sdp": localSd.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) HandleRennegotiationAnswer(from string, dst string, sdp string) (err error) {
|
||||
wf.RTCPeerConnectionMapMux.Lock()
|
||||
defer wf.RTCPeerConnectionMapMux.Unlock()
|
||||
if _, ok := wf.RTCPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
err = wf.RTCPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeAnswer})
|
||||
return
|
||||
}
|
||||
|
||||
func (wf *WebrtcFsManager) AddCandidate(candidate *webrtc.ICECandidateInit, from string) (err error) {
|
||||
wf.RTCPeerConnectionMapMux.Lock()
|
||||
defer wf.RTCPeerConnectionMapMux.Unlock()
|
||||
err = wf.RTCPeerConnections[from].AddICECandidate(*candidate)
|
||||
return
|
||||
}
|
||||
158
webrtcFsMiddleware.go
Normal file
158
webrtcFsMiddleware.go
Normal file
@ -0,0 +1,158 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
context "context"
|
||||
"strconv"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
INCOMING_PEER_FS ReqType = "incoming_peer_fs"
|
||||
LEAVING_PEER_FS ReqType = "leaving_peer_fs"
|
||||
WEBRTC_OFFER_FS ReqType = "offer_fs"
|
||||
WEBRTC_ANSWER_FS ReqType = "answer_fs"
|
||||
WEBRTC_RENNEGOTIATION_OFFER_FS ReqType = "rennegotiation_offer_fs"
|
||||
WEBRTC_RENNEGOTIATION_ANSWER_FS ReqType = "rennegotiation_answer_fs"
|
||||
WEBRTC_COUNTER_OFFER_FS ReqType = "webrtc_counter_offer_fs"
|
||||
WEBRTC_CANDIDATE_FS ReqType = "webrtc_candidate_fs"
|
||||
)
|
||||
|
||||
type WebRTCFsMiddleware struct {
|
||||
Manager *WebrtcFsManager
|
||||
stream GrpcManager_LinkClient
|
||||
}
|
||||
|
||||
func NewWebRTCFsMiddleware(manager *WebrtcFsManager) (webrtcFsMiddleware *WebRTCFsMiddleware) {
|
||||
webrtcFsMiddleware = &WebRTCFsMiddleware{
|
||||
Manager: manager,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (wfm *WebRTCFsMiddleware) signalCandidate(to string, candidate *webrtc.ICECandidate) (err error) {
|
||||
err = wfm.stream.Send(&Request{
|
||||
Type: string(WEBRTC_CANDIDATE_FS),
|
||||
From: "lolo_local_serv",
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": "lolo_local_serv",
|
||||
"to": to,
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (wfm *WebRTCFsMiddleware) Process(ctx context.Context, req *Response, stream GrpcManager_LinkClient) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
switch req.Type {
|
||||
case string(INCOMING_PEER_FS):
|
||||
logger.Println("quit squad called")
|
||||
if from, ok := req.Payload[FROM]; ok {
|
||||
logger.Println(from)
|
||||
//wfm.Manager.HandleLeavingMember(from)
|
||||
done <- struct{}{}
|
||||
}
|
||||
case string(PEER_CONNECTION_REQUEST):
|
||||
if err := validateRequest(req.Payload, FROM, TO); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
// if err := wfm.Manager.CreateOffer(ctx, req.Payload[FROM], req.Payload[TO], wfm.signalCandidate); err != nil {
|
||||
// errCh <- err
|
||||
// return
|
||||
// }
|
||||
done <- struct{}{}
|
||||
case string(WEBRTC_OFFER_FS):
|
||||
if err := validateRequest(req.GetPayload(), FROM, TO, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wfm.Manager.HandleOffer(ctx, req.GetPayload(), wfm.signalCandidate); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
case string(WEBRTC_ANSWER_FS):
|
||||
if err := validateRequest(req.GetPayload(), FROM, TO, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wfm.Manager.HandleAnswer(ctx, req.GetPayload()); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
case string(WEBRTC_COUNTER_OFFER_FS):
|
||||
if err := validateRequest(req.GetPayload(), FROM); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wfm.Manager.HandleCounterOffer(ctx, req.Payload); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
case string(WEBRTC_RENNEGOTIATION_ANSWER_FS):
|
||||
if err := validateRequest(req.GetPayload(), FROM, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wfm.Manager.HandleRennegotiationAnswer(req.Payload[FROM], "lolo_local_serv", req.Payload[SDP]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
case string(WEBRTC_RENNEGOTIATION_OFFER_FS):
|
||||
if err := validateRequest(req.GetPayload(), FROM, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wfm.Manager.HandleRennegotiationOffer(req.Payload[FROM], "", req.Payload[SDP]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
case string(WEBRTC_CANDIDATE_FS):
|
||||
if err := validateRequest(req.GetPayload(), FROM, "candidate", "sdpMlineIndex", "sdpMid"); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
logger.Println(req.Payload)
|
||||
i, err := strconv.Atoi(req.Payload["sdpMlineIndex"])
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
sdpMlineIndex := uint16(i)
|
||||
sdpMid := req.Payload["sdpMid"]
|
||||
logger.Println(sdpMid, sdpMlineIndex)
|
||||
if err := wfm.Manager.AddCandidate(&webrtc.ICECandidateInit{
|
||||
Candidate: req.Payload["candidate"],
|
||||
SDPMid: &sdpMid,
|
||||
SDPMLineIndex: &sdpMlineIndex,
|
||||
}, req.Payload[FROM]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
default:
|
||||
logger.Println("fs is correctly linked")
|
||||
done <- struct{}{}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
case err = <-errCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
195
webrtcGrpcMiddleware.go
Normal file
195
webrtcGrpcMiddleware.go
Normal file
@ -0,0 +1,195 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
type GrpcRequestType string
|
||||
|
||||
const (
|
||||
PEER_CONNECTION_REQUEST GrpcRequestType = "peer_connection_request"
|
||||
)
|
||||
|
||||
const (
|
||||
OFFER ReqType = "offer"
|
||||
ANSWER ReqType = "answer"
|
||||
COUNTER_OFFER ReqType = "webrtc_counter_offer"
|
||||
JOIN_HOSTED_SQUAD ReqType = "join_hosted_squad"
|
||||
HOSTED_SQUAD_ACCESS_DENIED ReqType = "hosted_squad_access_denied"
|
||||
HOSTED_SQUAD_STOP_CALL ReqType = "hosted_squad_stop_call"
|
||||
HOSTED_SQUAD_ACCESS_GRANTED ReqType = "hosted_squad_access_granted"
|
||||
LEAVE_HOSTED_SQUAD ReqType = "leave_hosted_squad"
|
||||
INCOMING_MEMBER_HOSTED ReqType = "incoming_member_hosted"
|
||||
LEAVING_MEMBER_HOSTED ReqType = "leaving_member_hosted"
|
||||
HOSTED_SQUAD_WEBRTC_OFFER ReqType = "hosted_squad_offer"
|
||||
HOSTED_SQUAD_WEBRTC_ANSWER ReqType = "hosted_squad_answer"
|
||||
HOSTED_SQUAD_WEBRTC_RENNEGOTIATION_OFFER ReqType = "hosted_squad_rennegotiation_offer"
|
||||
HOSTED_SQUAD_WEBRTC_RENNEGOTIATION_ANSWER ReqType = "hosted_squad_rennegotiation_answer"
|
||||
HOSTED_SQUAD_WEBRTC_COUNTER_OFFER ReqType = "hosted_squad_webrtc_counter_offer"
|
||||
HOSTED_SQUAD_WEBRTC_CANDIDATE ReqType = "hosted_squad_webrtc_candidate"
|
||||
HOSTED_SQUAD_REMOVE_VIDEO ReqType = "hosted_squad_remove_video"
|
||||
GET_HOSTED_SQUAD_TRACKS ReqType = "hosted_squad_get_tracks"
|
||||
NEW_HOSTED_SQUAD = "new_hosted_squad"
|
||||
)
|
||||
|
||||
type WebRTCGrpcMiddleware struct {
|
||||
Manager *WebRTCCallManager
|
||||
stream GrpcManager_LinkClient
|
||||
}
|
||||
|
||||
func NewWebRTCGrpcMiddleware(manager *WebRTCCallManager) (webrtcGrpcMiddleware *WebRTCGrpcMiddleware) {
|
||||
webrtcGrpcMiddleware = &WebRTCGrpcMiddleware{
|
||||
Manager: manager,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateRequest(req map[string]string, entries ...string) (err error) {
|
||||
for _, entry := range entries {
|
||||
if _, ok := req[entry]; !ok {
|
||||
err = fmt.Errorf("no field %s in req payload", entry)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (wgm *WebRTCGrpcMiddleware) signalCandidate(to string, candidate *webrtc.ICECandidate) (err error) {
|
||||
err = wgm.stream.Send(&Request{
|
||||
Type: string(HOSTED_SQUAD_WEBRTC_CANDIDATE),
|
||||
From: wgm.Manager.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": wgm.Manager.ID,
|
||||
"to": to,
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (wgm *WebRTCGrpcMiddleware) Process(ctx context.Context, req *Response, stream GrpcManager_LinkClient) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
switch req.Type {
|
||||
case NEW_HOSTED_SQUAD:
|
||||
if err := validateRequest(req.Payload, "ID"); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
logger.Println("new squad incoming")
|
||||
wgm.Manager.SquadMapMux.Lock()
|
||||
wgm.Manager.Squads[req.Payload["ID"]] = &Squad{
|
||||
ID: req.Payload["ID"],
|
||||
Members: []string{},
|
||||
}
|
||||
wgm.Manager.SquadMapMux.Unlock()
|
||||
case string(HOSTED_SQUAD_STOP_CALL):
|
||||
logger.Println("quit squad called")
|
||||
if err := validateRequest(req.Payload, FROM, "squadId"); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
wgm.Manager.HandleLeavingMember(req.Payload[FROM], req.Payload["squadId"])
|
||||
done <- struct{}{}
|
||||
case string(PEER_CONNECTION_REQUEST):
|
||||
if err := validateRequest(req.Payload, FROM, TO); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
logger.Println("creating offer for peer")
|
||||
if err := wgm.Manager.CreateOffer(ctx, req.Payload[FROM], req.Payload[TO], wgm.signalCandidate); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(HOSTED_SQUAD_WEBRTC_OFFER):
|
||||
if err := validateRequest(req.GetPayload(), FROM, TO, SDP, SQUAD_ID); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wgm.Manager.HandleOffer(ctx, req.GetPayload(), wgm.signalCandidate); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(HOSTED_SQUAD_WEBRTC_ANSWER):
|
||||
if err := validateRequest(req.GetPayload(), FROM, TO, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wgm.Manager.HandleAnswer(ctx, req.GetPayload()); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(HOSTED_SQUAD_WEBRTC_COUNTER_OFFER):
|
||||
if err := validateRequest(req.GetPayload(), FROM); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wgm.Manager.HandleCounterOffer(ctx, req.Payload); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(HOSTED_SQUAD_WEBRTC_RENNEGOTIATION_ANSWER):
|
||||
logger.Println("received negotiation answer")
|
||||
if err := validateRequest(req.GetPayload(), FROM, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wgm.Manager.HandleRennegotiationAnswer(req.Payload[FROM], req.Payload[SDP]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(HOSTED_SQUAD_WEBRTC_RENNEGOTIATION_OFFER):
|
||||
logger.Println("received negotiation offer")
|
||||
if err := validateRequest(req.GetPayload(), FROM, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := wgm.Manager.HandleRennegotiationOffer(req.Payload[FROM], req.Payload[SDP]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(HOSTED_SQUAD_WEBRTC_CANDIDATE):
|
||||
if err := validateRequest(req.GetPayload(), FROM, "candidate", "sdpMlineIndex", "sdpMid"); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
logger.Println(req.Payload)
|
||||
i, err := strconv.Atoi(req.Payload["sdpMlineIndex"])
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
sdpMlineIndex := uint16(i)
|
||||
sdpMid := req.Payload["sdpMid"]
|
||||
logger.Println(sdpMid, sdpMlineIndex)
|
||||
if err := wgm.Manager.AddCandidate(&webrtc.ICECandidateInit{
|
||||
Candidate: req.Payload["candidate"],
|
||||
SDPMid: &sdpMid,
|
||||
SDPMLineIndex: &sdpMlineIndex,
|
||||
}, req.Payload[FROM]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
default:
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
case err = <-errCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
66
webrtcHttpMiddleware.go
Normal file
66
webrtcHttpMiddleware.go
Normal file
@ -0,0 +1,66 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type (
|
||||
WebRTCHttpMiddleware struct {
|
||||
//menuItem *systray.MenuItem
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
CREATE_HOSTED_SQUAD = "create_hosted_squad"
|
||||
)
|
||||
|
||||
// func NewWebRTCHttpMiddleware(menuItem *systray.MenuItem) (webRTCHttpMiddleware *WebRTCHttpMiddleware) {
|
||||
// webRTCHttpMiddleware = &WebRTCHttpMiddleware{
|
||||
// menuItem: menuItem,
|
||||
// }
|
||||
// return
|
||||
// }
|
||||
|
||||
func (wm *WebRTCHttpMiddleware) Process(ctx context.Context, req *http.Request) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
localServerReq, err := wm.unmarshallBody(req)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
switch localServerReq.ReqType {
|
||||
case CREATE_HOSTED_SQUAD:
|
||||
|
||||
default:
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case err = <-errCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (wm *WebRTCHttpMiddleware) unmarshallBody(req *http.Request) (localServerReq *LocalServerRequest, err error) {
|
||||
reqBody, err := req.GetBody()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bs, err := io.ReadAll(reqBody)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(bs, &localServerReq)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
759
zoneAudioChannel.go
Normal file
759
zoneAudioChannel.go
Normal file
@ -0,0 +1,759 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
sync "sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pion/rtcp"
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
AUDIO_CHANNEL_ACCESS_DENIED ReqType = "audio_channel_access_denied"
|
||||
AUDIO_CHANNEL_STOP_CALL ReqType = "audio_channel_stop_call"
|
||||
AUDIO_CHANNEL_ACCESS_GRANTED ReqType = "audio_channel_access_granted"
|
||||
AUDIO_CHANNEL_WEBRTC_OFFER ReqType = "audio_channel_offer"
|
||||
AUDIO_CHANNEL_WEBRTC_ANSWER ReqType = "audio_channel_answer"
|
||||
AUDIO_CHANNEL_WEBRTC_RENNEGOTIATION_OFFER ReqType = "audio_channel_rennegotiation_offer"
|
||||
AUDIO_CHANNEL_WEBRTC_RENNEGOTIATION_ANSWER ReqType = "audio_channel_rennegotiation_answer"
|
||||
AUDIO_CHANNEL_WEBRTC_COUNTER_OFFER ReqType = "audio_channel_webrtc_counter_offer"
|
||||
AUDIO_CHANNEL_WEBRTC_CANDIDATE ReqType = "audio_channel_webrtc_candidate"
|
||||
AUDIO_CHANNEL_REMOVE_VIDEO ReqType = "audio_channel_remove_video"
|
||||
GET_AUDIO_CHANNEL_TRACKS ReqType = "audio_channel_get_tracks"
|
||||
)
|
||||
|
||||
const (
|
||||
AUDIO_CHANNEL_USER_MUTE = "audio_channel_user_mute"
|
||||
AUDIO_CHANNEL_USER_UNMUTE = "audio_channel_user_unmute"
|
||||
AUDIO_CHANNEL_USER_SPEAKING = "audio_channel_user_speaking"
|
||||
AUDIO_CHANNEL_USER_STOPPED_SPEAKING = "audio_channel_user_stopped_speaking"
|
||||
)
|
||||
|
||||
type ZoneRTCPeerConnection struct {
|
||||
*webrtc.PeerConnection
|
||||
makingOffer bool
|
||||
makingOfferLock *sync.Mutex
|
||||
negotiate func(string, SendDCMessageFunc)
|
||||
}
|
||||
|
||||
type AudioChannel struct {
|
||||
ID string `json:"id"`
|
||||
Owner string `json:"owner"`
|
||||
ChannelType string `json:"channelType"`
|
||||
Members []string `json:"members"`
|
||||
CurrentMembersId []string `json:"currentMembersId"`
|
||||
CurrentMembers map[string]*AudioChannelMember `json:"currentMembers"`
|
||||
localSD map[string]*webrtc.SessionDescription `json:"-"`
|
||||
rtcPeerConnections map[string]*ZoneRTCPeerConnection `json:"-"`
|
||||
audioTransceiver map[string][]*PeerSender `json:"-"`
|
||||
audiChannelsDataChannels map[string]*DataChannel `json:"-"`
|
||||
pendingCandidates map[string][]*webrtc.ICECandidate `json:"-"`
|
||||
remoteTracks map[string][]*RemoteTrack `json:"-"`
|
||||
middlewares []interface{} `json:"-"`
|
||||
candidateFlag *uint32 `json:"-"`
|
||||
remoteTracksFlag *uint32 `json:"-"`
|
||||
rtcPeerConnectionMapFlag *uint32 `json:"-"`
|
||||
dataChannelMapFlag *uint32 `json:"-"`
|
||||
localSDMapFlag *uint32 `json:"-"`
|
||||
audioSenderFlag *uint32 `json:"-"`
|
||||
}
|
||||
|
||||
type AudioChannelOnICECandidateFunc = func(string, string, *webrtc.ICECandidate) error
|
||||
|
||||
func NewAudioChannel(id string, owner string, channelType string, members []string, currentMembersId []string, currentMembers map[string]*AudioChannelMember) (audioChannel *AudioChannel) {
|
||||
candidateFlag := uint32(0)
|
||||
remoteTracksFlag := uint32(0)
|
||||
rtcPeerConnectionMapFlag := uint32(0)
|
||||
dataChannelMapFlag := uint32(0)
|
||||
localSDMapFlag := uint32(0)
|
||||
audioSenderFlag := uint32(0)
|
||||
audioChannel = &AudioChannel{
|
||||
ID: id,
|
||||
Owner: owner,
|
||||
ChannelType: channelType,
|
||||
Members: members,
|
||||
CurrentMembersId: currentMembersId,
|
||||
CurrentMembers: currentMembers,
|
||||
localSD: make(map[string]*webrtc.SessionDescription),
|
||||
rtcPeerConnections: make(map[string]*ZoneRTCPeerConnection),
|
||||
audioTransceiver: make(map[string][]*PeerSender),
|
||||
audiChannelsDataChannels: make(map[string]*DataChannel),
|
||||
pendingCandidates: make(map[string][]*webrtc.ICECandidate),
|
||||
remoteTracks: make(map[string][]*RemoteTrack),
|
||||
middlewares: make([]interface{}, 0),
|
||||
candidateFlag: &candidateFlag,
|
||||
remoteTracksFlag: &remoteTracksFlag,
|
||||
rtcPeerConnectionMapFlag: &rtcPeerConnectionMapFlag,
|
||||
dataChannelMapFlag: &dataChannelMapFlag,
|
||||
localSDMapFlag: &localSDMapFlag,
|
||||
audioSenderFlag: &audioSenderFlag,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) HandleOffer(ctx context.Context, channelId string, userId string, sdp string, hostId string, sendDCMessage SendDCMessageFunc, cb AudioChannelOnICECandidateFunc) (done chan struct{}, errCh chan error) {
|
||||
done, errCh = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
peerConnection, err := ac.createPeerConnection(userId, ac.ID, webrtc.SDPTypeAnswer, cb, sendDCMessage)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
ac.rtcPeerConnections[userId] = &ZoneRTCPeerConnection{
|
||||
PeerConnection: peerConnection,
|
||||
makingOffer: false,
|
||||
makingOfferLock: &sync.Mutex{},
|
||||
negotiate: ac.negotiate,
|
||||
}
|
||||
return
|
||||
})
|
||||
offer := webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeOffer,
|
||||
SDP: sdp,
|
||||
}
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
rawAnswer, err := peerConnection.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(ac.localSDMapFlag, func() (err error) {
|
||||
ac.localSD[userId] = &rawAnswer
|
||||
return
|
||||
})
|
||||
_, _ = sendDCMessage(string(AUDIO_CHANNEL_WEBRTC_ANSWER), hostId, userId, map[string]interface{}{
|
||||
"to": userId,
|
||||
"from": ac.ID,
|
||||
"channelId": channelId,
|
||||
"sdp": rawAnswer.SDP,
|
||||
})
|
||||
done <- struct{}{}
|
||||
logger.Println("handle offer done")
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) HandleCounterOffer(ctx context.Context, userId string, sendDCMessage SendDCMessageFunc) (err error) {
|
||||
if err = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := ac.rtcPeerConnections[userId]; !ok {
|
||||
err = fmt.Errorf("no field corresponding peer connection for id %s", userId)
|
||||
return
|
||||
}
|
||||
logger.Println("handling counter offer")
|
||||
connection := ac.rtcPeerConnections[userId]
|
||||
err = atomicallyExecute(ac.localSDMapFlag, func() (err error) {
|
||||
err = connection.SetLocalDescription(*ac.localSD[userId])
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(ac.localSDMapFlag, func() (err error) {
|
||||
delete(ac.localSD, userId)
|
||||
return
|
||||
})
|
||||
if err = atomicallyExecute(ac.candidateFlag, func() (err error) {
|
||||
for _, candidate := range ac.pendingCandidates[userId] {
|
||||
logger.Println("sending candidate to", userId, candidate)
|
||||
d, e := sendDCMessage(string(AUDIO_CHANNEL_WEBRTC_CANDIDATE), "", userId, map[string]interface{}{
|
||||
"from": ac.ID,
|
||||
"to": userId,
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(ac.pendingCandidates, userId)
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) HandleRennegotiationOffer(from string, sdp string, sendDCMessage SendDCMessageFunc) (err error) {
|
||||
err = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := ac.rtcPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
ac.rtcPeerConnections[from].makingOfferLock.Lock()
|
||||
if ac.rtcPeerConnections[from].makingOffer {
|
||||
ac.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
return fmt.Errorf("already making an offer or state is stable")
|
||||
}
|
||||
ac.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
if err = ac.rtcPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeOffer}); err != nil {
|
||||
return
|
||||
}
|
||||
localSd, err := ac.rtcPeerConnections[from].CreateAnswer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = ac.rtcPeerConnections[from].SetLocalDescription(localSd); err != nil {
|
||||
return
|
||||
}
|
||||
d, e := sendDCMessage(string(AUDIO_CHANNEL_WEBRTC_RENNEGOTIATION_ANSWER), ac.ID, from, map[string]interface{}{
|
||||
"from": ac.ID,
|
||||
"to": from,
|
||||
"sdp": localSd.SDP,
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) HandleRennegotiationAnswer(from string, sdp string) (err error) {
|
||||
logger.Println("---------------------handling rennego answer")
|
||||
err = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
ac.rtcPeerConnections[from].makingOfferLock.Lock()
|
||||
if ac.rtcPeerConnections[from].makingOffer {
|
||||
ac.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
return fmt.Errorf("already making an offer or state is stable")
|
||||
}
|
||||
ac.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
if _, ok := ac.rtcPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
err = ac.rtcPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeAnswer})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) AddCandidate(candidate *webrtc.ICECandidateInit, from string) (err error) {
|
||||
logger.Println("adding ice candidate", candidate)
|
||||
err = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := ac.rtcPeerConnections[from]; ok && candidate != nil {
|
||||
err = ac.rtcPeerConnections[from].AddICECandidate(*candidate)
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) createPeerConnection(target string, from string, peerType webrtc.SDPType, cb AudioChannelOnICECandidateFunc, sendDCMessage SendDCMessageFunc) (peerConnection *webrtc.PeerConnection, err error) {
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic : %v\n", r)
|
||||
}
|
||||
}()
|
||||
config := webrtc.Configuration{
|
||||
ICEServers: []webrtc.ICEServer{
|
||||
{
|
||||
URLs: []string{"stun:stun.l.google.com:19302", "stun:stunserver.org:3478"},
|
||||
},
|
||||
},
|
||||
SDPSemantics: webrtc.SDPSemanticsUnifiedPlanWithFallback,
|
||||
}
|
||||
|
||||
peerConnection, err = webrtc.NewPeerConnection(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("---------------------------------------------------")
|
||||
if peerType == webrtc.SDPTypeAnswer {
|
||||
maxRetransmits := uint16(100)
|
||||
channel, err := peerConnection.CreateDataChannel("data", &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channel.OnOpen(func() {
|
||||
logger.Println("channel opened")
|
||||
if chanErr := channel.SendText("yooo man this is open"); chanErr != nil {
|
||||
logger.Println(chanErr)
|
||||
}
|
||||
})
|
||||
channel.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if e := ac.HandleDataChannelEvents(event.From, event.EventId, event.Payload); e != nil {
|
||||
logger.Println("*-------------- datachannel error: ", e)
|
||||
}
|
||||
})
|
||||
logger.Println("new channel for target : ", target)
|
||||
channel.SetBufferedAmountLowThreshold(bufferedAmountLowThreshold)
|
||||
channel.OnBufferedAmountLow(func() {
|
||||
|
||||
})
|
||||
_ = atomicallyExecute(ac.dataChannelMapFlag, func() (err error) {
|
||||
logger.Println(target)
|
||||
l := int32(0)
|
||||
ac.audiChannelsDataChannels[target] = &DataChannel{
|
||||
DataChannel: channel,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
} else {
|
||||
peerConnection.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
_ = atomicallyExecute(ac.dataChannelMapFlag, func() (err error) {
|
||||
l := int32(0)
|
||||
ac.audiChannelsDataChannels[target] = &DataChannel{
|
||||
DataChannel: dc,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
dc.OnOpen(func() {
|
||||
logger.Printf("got a new open datachannel %s\n", dc.Label())
|
||||
})
|
||||
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if e := ac.HandleDataChannelEvents(event.From, event.EventId, event.Payload); e != nil {
|
||||
logger.Println("*-------------- datachannel error: ", e)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
err = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
err = atomicallyExecute(ac.remoteTracksFlag, func() (err error) {
|
||||
logger.Println("------------------", ac.CurrentMembersId)
|
||||
for _, id := range ac.CurrentMembersId {
|
||||
logger.Println(id)
|
||||
if id != target {
|
||||
if _, ok := ac.remoteTracks[id]; !ok {
|
||||
continue
|
||||
}
|
||||
for _, track := range ac.remoteTracks[id] {
|
||||
transceiver, err := peerConnection.AddTransceiverFromKind(track.Track.Kind(), webrtc.RTPTransceiverInit{Direction: webrtc.RTPTransceiverDirectionSendonly})
|
||||
if err != nil {
|
||||
logger.Println("add track error")
|
||||
continue
|
||||
}
|
||||
if err := transceiver.Sender().ReplaceTrack(track.Track); err != nil {
|
||||
logger.Println("add track error")
|
||||
continue
|
||||
}
|
||||
|
||||
_ = atomicallyExecute(ac.audioSenderFlag, func() (err error) {
|
||||
if len(ac.audioTransceiver) == 0 {
|
||||
ac.audioTransceiver[id] = []*PeerSender{{ID: target, Transceiver: transceiver}}
|
||||
} else {
|
||||
ac.audioTransceiver[id] = append(ac.audioTransceiver[id], &PeerSender{ID: target, Transceiver: transceiver})
|
||||
}
|
||||
return
|
||||
})
|
||||
logger.Println("track added", track)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
peerConnection.OnConnectionStateChange(func(pcs webrtc.PeerConnectionState) {
|
||||
if pcs == webrtc.PeerConnectionStateClosed || pcs == webrtc.PeerConnectionStateDisconnected || pcs == webrtc.PeerConnectionStateFailed {
|
||||
logger.Println(pcs)
|
||||
//ac.HandleLeavingMember(target, squadId)
|
||||
}
|
||||
})
|
||||
peerConnection.OnICEConnectionStateChange(func(is webrtc.ICEConnectionState) {
|
||||
logger.Printf("ICE connection state has changed %s\n", is.String())
|
||||
if is == webrtc.ICEConnectionStateDisconnected || is == webrtc.ICEConnectionStateFailed {
|
||||
logger.Println(is)
|
||||
}
|
||||
})
|
||||
peerConnection.OnTrack(func(tr *webrtc.TrackRemote, r *webrtc.RTPReceiver) {
|
||||
logger.Println("got new track")
|
||||
defer func() {
|
||||
if stopErr := r.Stop(); stopErr != nil {
|
||||
logger.Println(stopErr)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
ticker := time.NewTicker(1500 * time.Millisecond)
|
||||
for range ticker.C {
|
||||
if rtcpSendErr := peerConnection.WriteRTCP([]rtcp.Packet{&rtcp.PictureLossIndication{MediaSSRC: uint32(tr.SSRC())}}); rtcpSendErr != nil {
|
||||
logger.Println(rtcpSendErr)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
uniqId := uuid.New()
|
||||
i := fmt.Sprintf("%s/%s", target, uniqId.String())
|
||||
logger.Println("*************************----------------", i, "-----------------------***************")
|
||||
localTrack, newTrackErr := webrtc.NewTrackLocalStaticRTP(tr.Codec().RTPCodecCapability, i, i)
|
||||
if newTrackErr != nil {
|
||||
return
|
||||
}
|
||||
logger.Println(localTrack)
|
||||
rtpbuf := make([]byte, 1400)
|
||||
flag := int32(0)
|
||||
remote := &RemoteTrack{ID: target, Track: localTrack, rdv: &flag}
|
||||
_ = atomicallyExecute(ac.remoteTracksFlag, func() (err error) {
|
||||
if len(ac.remoteTracks[target]) == 0 {
|
||||
ac.remoteTracks[target] = []*RemoteTrack{remote}
|
||||
} else {
|
||||
ac.remoteTracks[target] = append(ac.remoteTracks[target], remote)
|
||||
}
|
||||
index := len(ac.remoteTracks[target])
|
||||
logger.Println(index, ac.remoteTracks)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
for _, id := range ac.CurrentMembersId {
|
||||
if id != target {
|
||||
if _, ok := ac.rtcPeerConnections[id]; !ok {
|
||||
continue
|
||||
}
|
||||
connection := ac.rtcPeerConnections[id]
|
||||
transceiver, tranceiverErr := connection.AddTransceiverFromKind(localTrack.Kind(), webrtc.RTPTransceiverInit{Direction: webrtc.RTPTransceiverDirectionSendonly})
|
||||
if tranceiverErr != nil {
|
||||
logger.Println(tranceiverErr)
|
||||
continue
|
||||
}
|
||||
if replaceTrackErr := transceiver.Sender().ReplaceTrack(localTrack); replaceTrackErr != nil {
|
||||
logger.Println(replaceTrackErr)
|
||||
continue
|
||||
}
|
||||
go func() {
|
||||
rtcpBuf := make([]byte, 1500)
|
||||
for {
|
||||
if _, _, rtcpErr := transceiver.Sender().Read(rtcpBuf); rtcpErr != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
if localTrack.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
_ = atomicallyExecute(ac.audioSenderFlag, func() (err error) {
|
||||
if len(ac.audioTransceiver) == 0 {
|
||||
ac.audioTransceiver[target] = []*PeerSender{{ID: id, Transceiver: transceiver}}
|
||||
} else {
|
||||
ac.audioTransceiver[target] = append(ac.audioTransceiver[target], &PeerSender{ID: id, Transceiver: transceiver})
|
||||
}
|
||||
return
|
||||
})
|
||||
} else if localTrack.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
logger.Println("track of wrong type")
|
||||
}
|
||||
go func() {
|
||||
<-time.After(time.Millisecond * 500)
|
||||
connection.negotiate(id, sendDCMessage)
|
||||
}()
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
d := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
i, _, readErr := tr.Read(rtpbuf)
|
||||
if readErr != nil {
|
||||
logger.Println(readErr)
|
||||
break
|
||||
}
|
||||
//logger.Println(rtpbuf[:i])
|
||||
f := atomic.LoadInt32(remote.rdv)
|
||||
if f == 0 {
|
||||
if _, writeErr := localTrack.Write(rtpbuf[:i]); writeErr != nil && !errors.Is(writeErr, io.ErrClosedPipe) {
|
||||
logger.Println(writeErr)
|
||||
break
|
||||
} else {
|
||||
_ = rtpbuf[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
d <- struct{}{}
|
||||
}()
|
||||
<-d
|
||||
})
|
||||
peerConnection.OnICECandidate(func(i *webrtc.ICECandidate) {
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(ac.candidateFlag, func() (err error) {
|
||||
desc := peerConnection.RemoteDescription()
|
||||
if desc == nil {
|
||||
logger.Println("generated candidate appended to list : ", i)
|
||||
ac.pendingCandidates[target] = append(ac.pendingCandidates[target], i)
|
||||
} else {
|
||||
logger.Println("generated candidate : ", i)
|
||||
if iceCandidateErr := cb(from, target, i); iceCandidateErr != nil {
|
||||
logger.Println(iceCandidateErr)
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
})
|
||||
peerConnection.OnNegotiationNeeded(func() {
|
||||
logger.Println("---------------- rennego is needed -----------")
|
||||
// _ = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
// for _, id := range ac.CurrentMembersId {
|
||||
// logger.Println("----------------- sending renego to peer with id", id)
|
||||
// if _, ok := ac.rtcPeerConnections[id]; !ok {
|
||||
// continue
|
||||
// }
|
||||
// if peerConnection.SignalingState() == webrtc.SignalingStateStable {
|
||||
// localSd, localSdErr := peerConnection.CreateOffer(nil)
|
||||
// if localSdErr != nil {
|
||||
// logger.Println(localSdErr)
|
||||
// return localSdErr
|
||||
// }
|
||||
// if err = peerConnection.SetLocalDescription(localSd); err != nil {
|
||||
// logger.Println(err)
|
||||
// return
|
||||
// }
|
||||
// d, e := sendDCMessage(string(AUDIO_CHANNEL_WEBRTC_RENNEGOTIATION_OFFER), ac.ID, id, map[string]interface{}{
|
||||
// "from": ac.ID,
|
||||
// "to": id,
|
||||
// "sdp": localSd.SDP,
|
||||
// })
|
||||
// select {
|
||||
// case <-d:
|
||||
// case err = <-e:
|
||||
// logger.Println(err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return
|
||||
// })
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) HandleLeavingMember(id string) {
|
||||
if err := atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := ac.rtcPeerConnections[id]; !ok {
|
||||
err = fmt.Errorf("no corresponding peerconnection for audio channel leaving member")
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
} else {
|
||||
defer func() {
|
||||
_ = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := ac.rtcPeerConnections[id]; ok {
|
||||
if closeErr := ac.rtcPeerConnections[id].Close(); closeErr != nil {
|
||||
err = closeErr
|
||||
logger.Println("peer connection close error", closeErr)
|
||||
}
|
||||
}
|
||||
delete(ac.rtcPeerConnections, id)
|
||||
return
|
||||
})
|
||||
}()
|
||||
}
|
||||
logger.Printf("peer %s is leaving the squad\n", id)
|
||||
_ = atomicallyExecute(ac.dataChannelMapFlag, func() (err error) {
|
||||
if _, ok := ac.audiChannelsDataChannels[id]; ok {
|
||||
ac.audiChannelsDataChannels[id].DataChannel.Close()
|
||||
}
|
||||
delete(ac.audiChannelsDataChannels, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(ac.localSDMapFlag, func() (err error) {
|
||||
delete(ac.localSD, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(ac.candidateFlag, func() (err error) {
|
||||
delete(ac.pendingCandidates, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(ac.audioSenderFlag, func() (err error) {
|
||||
for peerId, peerSender := range ac.audioTransceiver {
|
||||
if peerId != id {
|
||||
logger.Println("senders", peerSender)
|
||||
c := 0
|
||||
for i, sender := range peerSender {
|
||||
if sender.ID == id {
|
||||
if senderErr := sender.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if transceiverErr := sender.Transceiver.Stop(); transceiverErr != nil {
|
||||
logger.Println("transceiverErr occured with video", transceiverErr)
|
||||
}
|
||||
peerSender[len(peerSender)-i-1], peerSender[i] = peerSender[i], peerSender[len(peerSender)-i-1]
|
||||
c++
|
||||
}
|
||||
}
|
||||
ac.audioTransceiver[peerId] = ac.audioTransceiver[peerId][:len(peerSender)-(c)]
|
||||
logger.Println(ac.audioTransceiver[peerId])
|
||||
}
|
||||
}
|
||||
for _, transceiver := range ac.audioTransceiver[id] {
|
||||
if senderErr := transceiver.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if stopErr := transceiver.Transceiver.Stop(); stopErr != nil {
|
||||
logger.Println("transceiver audio stop error", stopErr)
|
||||
}
|
||||
}
|
||||
delete(ac.audioTransceiver, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(ac.remoteTracksFlag, func() (err error) {
|
||||
delete(ac.remoteTracks, id)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) negotiate(target string, sendDCMessage SendDCMessageFunc) {
|
||||
logger.Println("------------------negotiate is called")
|
||||
_ = atomicallyExecute(ac.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := ac.rtcPeerConnections[target]; !ok {
|
||||
return
|
||||
}
|
||||
ac.rtcPeerConnections[target].makingOfferLock.Lock()
|
||||
ac.rtcPeerConnections[target].makingOffer = true
|
||||
ac.rtcPeerConnections[target].makingOfferLock.Unlock()
|
||||
defer func() {
|
||||
ac.rtcPeerConnections[target].makingOfferLock.Lock()
|
||||
ac.rtcPeerConnections[target].makingOffer = false
|
||||
ac.rtcPeerConnections[target].makingOfferLock.Unlock()
|
||||
}()
|
||||
|
||||
for _, id := range ac.CurrentMembersId {
|
||||
logger.Println("----------------- sending renego to peer with id", id)
|
||||
if _, ok := ac.rtcPeerConnections[id]; !ok {
|
||||
continue
|
||||
}
|
||||
connection := ac.rtcPeerConnections[id]
|
||||
if connection.SignalingState() == webrtc.SignalingStateStable {
|
||||
localSd, err := connection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
return err
|
||||
}
|
||||
if err = connection.SetLocalDescription(localSd); err != nil {
|
||||
logger.Println(err)
|
||||
return err
|
||||
}
|
||||
d, e := sendDCMessage(string(AUDIO_CHANNEL_WEBRTC_RENNEGOTIATION_OFFER), ac.ID, id, map[string]interface{}{
|
||||
"from": ac.ID,
|
||||
"to": id,
|
||||
"sdp": localSd.SDP,
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
logger.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) broadcastDatachannelMessage(from string, eventId string, payload map[string]interface{}) (done chan struct{}, errCh chan error) {
|
||||
done, errCh = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
bs, jsonErr := json.Marshal(&ZoneResponse{
|
||||
Type: eventId,
|
||||
From: ac.ID,
|
||||
Payload: payload,
|
||||
})
|
||||
if jsonErr != nil {
|
||||
errCh <- jsonErr
|
||||
return
|
||||
}
|
||||
if err := atomicallyExecute(ac.dataChannelMapFlag, func() (err error) {
|
||||
for id, dc := range ac.audiChannelsDataChannels {
|
||||
if from != id {
|
||||
if err = dc.DataChannel.SendText(string(bs)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (ac *AudioChannel) HandleDataChannelEvents(from string, eventId string, payload map[string]interface{}) (err error) {
|
||||
switch eventId {
|
||||
case AUDIO_CHANNEL_USER_MUTE:
|
||||
if err = atomicallyExecute(ac.remoteTracksFlag, func() (err error) {
|
||||
if _, ok := ac.remoteTracks[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding remote tracks entry for id %s", from)
|
||||
return
|
||||
}
|
||||
for _, track := range ac.remoteTracks[from] {
|
||||
atomic.SwapInt32(track.rdv, 1)
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
done, errCh := ac.broadcastDatachannelMessage(from, AUDIO_CHANNEL_USER_MUTE, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case AUDIO_CHANNEL_USER_UNMUTE:
|
||||
if err = atomicallyExecute(ac.remoteTracksFlag, func() (err error) {
|
||||
if _, ok := ac.remoteTracks[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding remote tracks entry for id %s", from)
|
||||
return
|
||||
}
|
||||
for _, track := range ac.remoteTracks[from] {
|
||||
atomic.SwapInt32(track.rdv, 0)
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
done, errCh := ac.broadcastDatachannelMessage(from, AUDIO_CHANNEL_USER_UNMUTE, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case AUDIO_CHANNEL_USER_SPEAKING:
|
||||
done, errCh := ac.broadcastDatachannelMessage(from, AUDIO_CHANNEL_USER_SPEAKING, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case AUDIO_CHANNEL_USER_STOPPED_SPEAKING:
|
||||
done, errCh := ac.broadcastDatachannelMessage(from, AUDIO_CHANNEL_USER_STOPPED_SPEAKING, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
1039
zoneAudioChannelshandler.go
Normal file
1039
zoneAudioChannelshandler.go
Normal file
File diff suppressed because it is too large
Load Diff
179
zoneChatsDBHandler.go
Normal file
179
zoneChatsDBHandler.go
Normal file
@ -0,0 +1,179 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
)
|
||||
|
||||
type ChatFile struct {
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int `json:"size"`
|
||||
UploadTime string `json:"uploadTime`
|
||||
}
|
||||
|
||||
type ChatMessage struct {
|
||||
ID uint64 `json:"id"`
|
||||
From string `json:"from"`
|
||||
ResponseOf *ChatMessage `json:"ResponseOf"`
|
||||
Tags []string `json:"tags"`
|
||||
Content string `json:"content"`
|
||||
Date string `json:"date"`
|
||||
File *ChatFile `json:"file"`
|
||||
}
|
||||
|
||||
type ZoneChatDBHandler struct {
|
||||
ChatID string
|
||||
ZoneID string
|
||||
PreviousId uint64
|
||||
db func(func(*badger.DB) (err error)) (err error)
|
||||
}
|
||||
|
||||
func NewZoneChatDBHandler(zoneId string, chatID string) (zoneChatDBHandler *ZoneChatDBHandler, err error) {
|
||||
zoneChatDBHandler = &ZoneChatDBHandler{
|
||||
db: func(f func(*badger.DB) (err error)) (err error) {
|
||||
db, err := badger.Open(badger.DefaultOptions(filepath.Join("data", "zones", zoneId, "chats", chatID)).WithLogger(dbLogger))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
err = f(db)
|
||||
return
|
||||
},
|
||||
ChatID: chatID,
|
||||
ZoneID: zoneId,
|
||||
}
|
||||
err = zoneChatDBHandler.db(func(d *badger.DB) (err error) {
|
||||
err = d.View(func(txn *badger.Txn) error {
|
||||
opt := badger.DefaultIteratorOptions
|
||||
it := txn.NewIterator(opt)
|
||||
defer it.Close()
|
||||
counter := 0
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
counter++
|
||||
}
|
||||
logger.Println(counter)
|
||||
zoneChatDBHandler.PreviousId = uint64(counter)
|
||||
return nil
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneChatDBHandler) updateDbCallbackFolder(newChatId string) {
|
||||
zcdbh.db = func(f func(*badger.DB) (err error)) (err error) {
|
||||
db, err := badger.Open(badger.DefaultOptions(filepath.Join("data", "zones", zcdbh.ZoneID, "chats", newChatId)).WithLogger(dbLogger))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
err = f(db)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneChatDBHandler) AddNewChatMessage(chatMessage *ChatMessage) (err error) {
|
||||
b := make([]byte, 100)
|
||||
binary.LittleEndian.PutUint64(b, zcdbh.PreviousId+1)
|
||||
chatMessage.ID = zcdbh.PreviousId
|
||||
zcdbh.PreviousId++
|
||||
bs, err := json.Marshal(chatMessage)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
if updateErr := txn.Set(b, bs); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneChatDBHandler) DeleteChatMessage(key uint64) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneChatDBHandler) ListChatMessages(lastIndex int, limit int) (chatMessages []*ChatMessage, l int, err error) {
|
||||
err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.View(func(txn *badger.Txn) (err error) {
|
||||
opt := badger.DefaultIteratorOptions
|
||||
opt.Reverse = true
|
||||
it := txn.NewIterator(opt)
|
||||
b := make([]byte, 100)
|
||||
if lastIndex <= 0 {
|
||||
binary.LittleEndian.PutUint64(b, uint64(zcdbh.PreviousId))
|
||||
} else {
|
||||
binary.LittleEndian.PutUint64(b, uint64(lastIndex))
|
||||
}
|
||||
x := 0
|
||||
defer it.Close()
|
||||
defer func() {
|
||||
if lastIndex > limit {
|
||||
l = lastIndex - limit - 1
|
||||
} else if lastIndex == 0 {
|
||||
if zcdbh.PreviousId > uint64(limit) {
|
||||
l = int(zcdbh.PreviousId) - limit - 1
|
||||
} else {
|
||||
l = 0
|
||||
}
|
||||
} else {
|
||||
l = 0
|
||||
}
|
||||
}()
|
||||
chatMessages = make([]*ChatMessage, 0)
|
||||
for it.Seek(b); it.Valid(); it.Next() {
|
||||
if x >= limit {
|
||||
break
|
||||
}
|
||||
item := it.Item()
|
||||
if err = item.Value(func(val []byte) (err error) {
|
||||
var chatMessage *ChatMessage
|
||||
if err = json.Unmarshal(val, &chatMessage); err != nil {
|
||||
return err
|
||||
}
|
||||
chatMessages = append(chatMessages, chatMessage)
|
||||
return
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
x++
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneChatDBHandler) GetChatMessage(index uint64) (chatMessage *ChatMessage, err error) {
|
||||
err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.View(func(txn *badger.Txn) (err error) {
|
||||
b := make([]byte, 100)
|
||||
binary.LittleEndian.PutUint64(b, uint64(zcdbh.PreviousId))
|
||||
item, err := txn.Get(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = item.Value(func(val []byte) error {
|
||||
return json.Unmarshal(val, &chatMessage)
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneChatDBHandler) ModifyChatMessage(id string, chatMessage *ChatMessage) (err error) {
|
||||
return
|
||||
}
|
||||
1015
zoneChatsHandler.go
Normal file
1015
zoneChatsHandler.go
Normal file
File diff suppressed because it is too large
Load Diff
169
zoneFSDBhandler.go
Normal file
169
zoneFSDBhandler.go
Normal file
@ -0,0 +1,169 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
)
|
||||
|
||||
type ZoneFilesDBHandler struct {
|
||||
CurrentLocation string
|
||||
ZoneID string
|
||||
rootPath string
|
||||
db func(string, func(*badger.DB) (err error)) (err error)
|
||||
}
|
||||
|
||||
func NewZoneFilesDBHandler(zoneId string) (zoneFilesDBHandler *ZoneFilesDBHandler, err error) {
|
||||
zoneFilesDBHandler = &ZoneFilesDBHandler{
|
||||
db: func(path string, f func(*badger.DB) (err error)) (err error) {
|
||||
root := filepath.Join("data", "zones", zoneId, "fs", path)
|
||||
db, err := badger.Open(badger.DefaultOptions(root).WithLogger(dbLogger))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
err = f(db)
|
||||
return
|
||||
},
|
||||
ZoneID: zoneId,
|
||||
}
|
||||
zoneFilesDBHandler.rootPath = filepath.Join("data", "zones", zoneId, "fs")
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneFilesDBHandler) AddNewFSEntity(path string, fsEntity *ZoneFSEntity) (err error) {
|
||||
bs, err := json.Marshal(fsEntity)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = zcdbh.db(path, func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
if updateErr := txn.Set([]byte(fsEntity.Name), bs); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if fsEntity.Folder {
|
||||
err = os.Mkdir(filepath.Join("data", "zones", zcdbh.ZoneID, "fs", path, fsEntity.Name), 0700)
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneFilesDBHandler) DeleteFolder(path, name string) (err error) {
|
||||
if err = zcdbh.db(path, func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
err = txn.Delete([]byte(name))
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
err = os.RemoveAll(filepath.Join(zcdbh.rootPath, path, name))
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneFilesDBHandler) DeleteFile(path, name string) (err error) {
|
||||
if err = zcdbh.db(path, func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
err = txn.Delete([]byte(name))
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
err = os.Remove(filepath.Join(zcdbh.rootPath, path, "__files__", name))
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneFilesDBHandler) ListZoneFSEntity(path string, userId string, lastIndex int, limit int) (fsEntities []*ZoneFSEntity, l int, err error) {
|
||||
err = zcdbh.db(path, func(d *badger.DB) (err error) {
|
||||
err = d.View(func(txn *badger.Txn) (err error) {
|
||||
opt := badger.DefaultIteratorOptions
|
||||
opt.Reverse = true
|
||||
it := txn.NewIterator(opt)
|
||||
defer it.Close()
|
||||
fsEntities = make([]*ZoneFSEntity, 0)
|
||||
splittedPath := filepath.SplitList(path)
|
||||
var parent *ZoneFSEntity
|
||||
if len(splittedPath) >= 2 {
|
||||
parent, err = zcdbh.GetFSEntity(filepath.Dir(path), filepath.Base(path))
|
||||
} else if len(splittedPath) > 0 {
|
||||
parent = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if err = item.Value(func(val []byte) (err error) {
|
||||
var fsEntity *ZoneFSEntity
|
||||
if err = json.Unmarshal(val, &fsEntity); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Println("------------------", fsEntity.Name)
|
||||
if _, ok := fsEntity.Members[userId]; ok || fsEntity.Type == "public" {
|
||||
fsEntities = append(fsEntities, fsEntity)
|
||||
} else if parent != nil {
|
||||
if _, ok := parent.Members[userId]; ok && fsEntity.Type != "private" {
|
||||
if parent.Members[userId].Read {
|
||||
fsEntities = append(fsEntities, fsEntity)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneFilesDBHandler) GetFSEntity(path string, folderName string) (folder *ZoneFSEntity, err error) {
|
||||
err = zcdbh.db(path, func(d *badger.DB) (err error) {
|
||||
err = d.View(func(txn *badger.Txn) (err error) {
|
||||
item, err := txn.Get([]byte(folderName))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = item.Value(func(val []byte) (err error) {
|
||||
err = json.Unmarshal(val, &folder)
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneFilesDBHandler) SetFSEntity(path string, oldName string, fsEntity *ZoneFSEntity) (err error) {
|
||||
bs, err := json.Marshal(fsEntity)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = zcdbh.db(path, func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
if dErr := txn.Delete([]byte(oldName)); err != nil {
|
||||
logger.Println("error from here:", dErr)
|
||||
}
|
||||
if updateErr := txn.Set([]byte(fsEntity.Name), bs); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
1114
zoneFSHandler.go
Normal file
1114
zoneFSHandler.go
Normal file
File diff suppressed because it is too large
Load Diff
559
zoneFSInstance.go
Normal file
559
zoneFSInstance.go
Normal file
@ -0,0 +1,559 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
sync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
ZONE_FS_WEBRTC_OFFER ReqType = "zone_fs_offer"
|
||||
ZONE_FS_WEBRTC_ANSWER ReqType = "zone_fs_answer"
|
||||
ZONE_FS_WEBRTC_RENNEGOTIATION_OFFER ReqType = "zone_fs_rennegotiation_offer"
|
||||
ZONE_FS_WEBRTC_RENNEGOTIATION_ANSWER ReqType = "zone_fs_rennegotiation_answer"
|
||||
ZONE_FS_WEBRTC_COUNTER_OFFER ReqType = "zone_fs_webrtc_counter_offer"
|
||||
ZONE_FS_WEBRTC_CANDIDATE ReqType = "zone_fs_webrtc_candidate"
|
||||
)
|
||||
|
||||
const ()
|
||||
|
||||
type FSInstance struct {
|
||||
ZoneID string `json:"id"`
|
||||
Owner string `json:"owner"`
|
||||
Members []string `json:"members"`
|
||||
OpenFiles map[string]*os.File `json:"-"`
|
||||
localSD map[string]*webrtc.SessionDescription `json:"-"`
|
||||
rtcPeerConnections map[string]*ZoneRTCPeerConnection `json:"-"`
|
||||
zoneFSDataChannels map[string]map[string]*DataChannel `json:"-"`
|
||||
pendingCandidates map[string][]*webrtc.ICECandidate `json:"-"`
|
||||
middlewares []interface{} `json:"-"`
|
||||
candidateFlag *uint32 `json:"-"`
|
||||
filesFlag *uint32 `json:"-"`
|
||||
rtcPeerConnectionMapFlag *uint32 `json:"-"`
|
||||
dataChannelMapFlag *uint32 `json:"-"`
|
||||
localSDMapFlag *uint32 `json:"-"`
|
||||
}
|
||||
|
||||
type FSInstanceOnICECandidateFunc = func(string, string, *webrtc.ICECandidate) error
|
||||
|
||||
func NewFSInstance(id, owner string, members []string) (audioChannel *FSInstance) {
|
||||
candidateFlag := uint32(0)
|
||||
rtcPeerConnectionMapFlag := uint32(0)
|
||||
dataChannelMapFlag := uint32(0)
|
||||
filesFlag := uint32(0)
|
||||
localSDMapFlag := uint32(0)
|
||||
audioChannel = &FSInstance{
|
||||
ZoneID: id,
|
||||
Owner: owner,
|
||||
Members: members,
|
||||
OpenFiles: make(map[string]*os.File),
|
||||
localSD: make(map[string]*webrtc.SessionDescription),
|
||||
rtcPeerConnections: make(map[string]*ZoneRTCPeerConnection),
|
||||
zoneFSDataChannels: make(map[string]map[string]*DataChannel),
|
||||
pendingCandidates: make(map[string][]*webrtc.ICECandidate),
|
||||
middlewares: make([]interface{}, 0),
|
||||
candidateFlag: &candidateFlag,
|
||||
filesFlag: &filesFlag,
|
||||
rtcPeerConnectionMapFlag: &rtcPeerConnectionMapFlag,
|
||||
dataChannelMapFlag: &dataChannelMapFlag,
|
||||
localSDMapFlag: &localSDMapFlag,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) SetupFileUpload(path, filename, userId string) (err error) {
|
||||
concretePath := filepath.Join("data", "zones", fs.ZoneID, "fs", path, "__files__", filename)
|
||||
if _, rErr := os.ReadDir(filepath.Join("data", "zones", fs.ZoneID, "fs", path, "__files__")); os.IsNotExist(rErr) {
|
||||
if err = os.MkdirAll(filepath.Join("data", "zones", fs.ZoneID, "fs", path, "__files__"), 0700); err != nil {
|
||||
return
|
||||
}
|
||||
} else if rErr != nil {
|
||||
return rErr
|
||||
}
|
||||
file, err := os.OpenFile(concretePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(fs.filesFlag, func() (err error) {
|
||||
fs.OpenFiles[filename] = file
|
||||
return
|
||||
})
|
||||
err = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if pc, ok := fs.rtcPeerConnections[userId]; ok {
|
||||
maxRetransmits := uint16(100)
|
||||
var dc *webrtc.DataChannel
|
||||
dc, err = pc.CreateDataChannel(filename, &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dc.OnOpen(func() {
|
||||
logger.Println("!-----------------------------!")
|
||||
logger.Printf("datachannel with id %s is now open\n", dc.Label())
|
||||
logger.Println("!-----------------------------!")
|
||||
})
|
||||
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
_, _ = file.Write(msg.Data)
|
||||
})
|
||||
dc.OnClose(func() {
|
||||
_ = atomicallyExecute(fs.filesFlag, func() (err error) {
|
||||
if f, ok := fs.OpenFiles[filename]; ok {
|
||||
err = f.Close()
|
||||
}
|
||||
delete(fs.OpenFiles, filename)
|
||||
return
|
||||
})
|
||||
})
|
||||
err = atomicallyExecute(fs.dataChannelMapFlag, func() (err error) {
|
||||
if _, ok := fs.zoneFSDataChannels[userId]; !ok {
|
||||
err = fmt.Errorf("no corresponding map entry in zoneFSDataChannels for id %s", userId)
|
||||
return
|
||||
}
|
||||
l := int32(0)
|
||||
fs.zoneFSDataChannels[userId][dc.Label()] = &DataChannel{
|
||||
DataChannel: dc,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
} else {
|
||||
err = fmt.Errorf("no peerconnection for id %s", userId)
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) SetupFileDownload(path, filename, userId string) (err error) {
|
||||
concretePath := filepath.Join("data", "zones", fs.ZoneID, "fs", path, "__files__", filename)
|
||||
file, err := os.OpenFile(concretePath, os.O_RDONLY, 0755)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(fs.filesFlag, func() (err error) {
|
||||
fs.OpenFiles[filename] = file
|
||||
return
|
||||
})
|
||||
err = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if pc, ok := fs.rtcPeerConnections[userId]; ok {
|
||||
maxRetransmits := uint16(100)
|
||||
var dc *webrtc.DataChannel
|
||||
dc, err = pc.CreateDataChannel(filename, &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dc.SetBufferedAmountLowThreshold(16000000)
|
||||
bufferedAmountLock := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
dc.OnOpen(func() {
|
||||
go func() {
|
||||
defer func() {
|
||||
bufferedAmountLock = nil
|
||||
}()
|
||||
r := bufio.NewReader(file)
|
||||
buf := make([]byte, 0, 60000)
|
||||
for {
|
||||
n, readErr := r.Read(buf[:cap(buf)])
|
||||
buf = buf[:n]
|
||||
if n == 0 {
|
||||
if err == nil {
|
||||
logger.Println("n is 0 weird")
|
||||
break
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
logger.Println(readErr)
|
||||
return
|
||||
}
|
||||
if err = dc.Send(buf); err != nil {
|
||||
logger.Println(err)
|
||||
}
|
||||
if dc.BufferedAmount() > dc.
|
||||
BufferedAmountLowThreshold() {
|
||||
<-bufferedAmountLock
|
||||
}
|
||||
}
|
||||
logger.Println("done")
|
||||
_ = dc.SendText("done")
|
||||
<-time.After(time.Second * 5)
|
||||
_ = dc.Close()
|
||||
}()
|
||||
})
|
||||
dc.OnBufferedAmountLow(func() {
|
||||
bufferedAmountLock <- struct{}{}
|
||||
})
|
||||
dc.OnClose(func() {
|
||||
done <- struct{}{}
|
||||
defer close(done)
|
||||
_ = atomicallyExecute(fs.filesFlag, func() (err error) {
|
||||
if f, ok := fs.OpenFiles[filename]; ok {
|
||||
err = f.Close()
|
||||
}
|
||||
delete(fs.OpenFiles, filename)
|
||||
return
|
||||
})
|
||||
|
||||
})
|
||||
} else {
|
||||
err = fmt.Errorf("no peerconnection for id %s", userId)
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) HandleOffer(ctx context.Context, channelId, userId, sdp, hostId string, sendDCMessage SendDCMessageFunc, cb FSInstanceOnICECandidateFunc) (done chan struct{}, errCh chan error) {
|
||||
done, errCh = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
peerConnection, err := fs.createPeerConnection(userId, fs.ZoneID, webrtc.SDPTypeAnswer, cb, sendDCMessage)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
fs.rtcPeerConnections[userId] = &ZoneRTCPeerConnection{
|
||||
PeerConnection: peerConnection,
|
||||
makingOffer: false,
|
||||
makingOfferLock: &sync.Mutex{},
|
||||
}
|
||||
return
|
||||
})
|
||||
offer := webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeOffer,
|
||||
SDP: sdp,
|
||||
}
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
rawAnswer, err := peerConnection.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(fs.localSDMapFlag, func() (err error) {
|
||||
fs.localSD[userId] = &rawAnswer
|
||||
return
|
||||
})
|
||||
_, _ = sendDCMessage(string(ZONE_FS_WEBRTC_ANSWER), hostId, userId, map[string]interface{}{
|
||||
"to": userId,
|
||||
"from": fs.ZoneID,
|
||||
"channelId": channelId,
|
||||
"sdp": rawAnswer.SDP,
|
||||
})
|
||||
done <- struct{}{}
|
||||
logger.Println("handle offer done")
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) HandleCounterOffer(ctx context.Context, userId string, sendDCMessage SendDCMessageFunc) (err error) {
|
||||
if err = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := fs.rtcPeerConnections[userId]; !ok {
|
||||
err = fmt.Errorf("no field corresponding peer connection for id %s", userId)
|
||||
return
|
||||
}
|
||||
logger.Println("handling counter offer")
|
||||
connection := fs.rtcPeerConnections[userId]
|
||||
err = atomicallyExecute(fs.localSDMapFlag, func() (err error) {
|
||||
err = connection.SetLocalDescription(*fs.localSD[userId])
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(fs.localSDMapFlag, func() (err error) {
|
||||
delete(fs.localSD, userId)
|
||||
return
|
||||
})
|
||||
if err = atomicallyExecute(fs.candidateFlag, func() (err error) {
|
||||
for _, candidate := range fs.pendingCandidates[userId] {
|
||||
logger.Println("sending candidate to", userId, candidate)
|
||||
d, e := sendDCMessage(string(ZONE_FS_WEBRTC_CANDIDATE), "", userId, map[string]interface{}{
|
||||
"from": fs.ZoneID,
|
||||
"to": userId,
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(fs.pendingCandidates, userId)
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) HandleRennegotiationOffer(from, sdp string, sendDCMessage SendDCMessageFunc) (err error) {
|
||||
err = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := fs.rtcPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
fs.rtcPeerConnections[from].makingOfferLock.Lock()
|
||||
if fs.rtcPeerConnections[from].makingOffer {
|
||||
fs.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
return fmt.Errorf("already making an offer or state is stable")
|
||||
}
|
||||
fs.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
if err = fs.rtcPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeOffer}); err != nil {
|
||||
return
|
||||
}
|
||||
localSd, err := fs.rtcPeerConnections[from].CreateAnswer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = fs.rtcPeerConnections[from].SetLocalDescription(localSd); err != nil {
|
||||
return
|
||||
}
|
||||
d, e := sendDCMessage(string(ZONE_FS_WEBRTC_RENNEGOTIATION_ANSWER), fs.ZoneID, from, map[string]interface{}{
|
||||
"from": fs.ZoneID,
|
||||
"to": from,
|
||||
"sdp": localSd.SDP,
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) HandleRennegotiationAnswer(from, sdp string) (err error) {
|
||||
logger.Println("---------------------handling rennego answer")
|
||||
err = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
err = fs.rtcPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeAnswer})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) AddCandidate(candidate *webrtc.ICECandidateInit, from string) (err error) {
|
||||
logger.Println("adding ice candidate", candidate)
|
||||
err = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := fs.rtcPeerConnections[from]; ok && candidate != nil {
|
||||
err = fs.rtcPeerConnections[from].AddICECandidate(*candidate)
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) createPeerConnection(target, from string, peerType webrtc.SDPType, cb FSInstanceOnICECandidateFunc, sendDCMessage SendDCMessageFunc) (peerConnection *webrtc.PeerConnection, err error) {
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic : %v\n", r)
|
||||
}
|
||||
}()
|
||||
config := webrtc.Configuration{
|
||||
ICEServers: []webrtc.ICEServer{
|
||||
{
|
||||
URLs: []string{"stun:stun.l.google.com:19302", "stun:stunserver.org:3478"},
|
||||
},
|
||||
},
|
||||
SDPSemantics: webrtc.SDPSemanticsUnifiedPlanWithFallback,
|
||||
}
|
||||
|
||||
peerConnection, err = webrtc.NewPeerConnection(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("---------------------------------------------------")
|
||||
if peerType == webrtc.SDPTypeAnswer {
|
||||
maxRetransmits := uint16(100)
|
||||
channel, err := peerConnection.CreateDataChannel("data", &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channel.OnOpen(func() {
|
||||
logger.Println("channel opened")
|
||||
if chanErr := channel.SendText("yooo man this is open"); chanErr != nil {
|
||||
logger.Println(chanErr)
|
||||
}
|
||||
})
|
||||
channel.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if e := fs.HandleDataChannelEvents(event.From, event.EventId, event.Payload); e != nil {
|
||||
logger.Println("*-------------- datachannel error: ", e)
|
||||
}
|
||||
})
|
||||
logger.Println("new channel for target : ", target)
|
||||
channel.SetBufferedAmountLowThreshold(bufferedAmountLowThreshold)
|
||||
channel.OnBufferedAmountLow(func() {
|
||||
|
||||
})
|
||||
_ = atomicallyExecute(fs.dataChannelMapFlag, func() (err error) {
|
||||
logger.Println(target)
|
||||
l := int32(0)
|
||||
if _, ok := fs.zoneFSDataChannels[target]; !ok {
|
||||
fs.zoneFSDataChannels[target] = make(map[string]*DataChannel)
|
||||
}
|
||||
fs.zoneFSDataChannels[target][channel.Label()] = &DataChannel{
|
||||
DataChannel: channel,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
} else {
|
||||
peerConnection.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
_ = atomicallyExecute(fs.dataChannelMapFlag, func() (err error) {
|
||||
l := int32(0)
|
||||
if _, ok := fs.zoneFSDataChannels[target]; !ok {
|
||||
fs.zoneFSDataChannels[target] = make(map[string]*DataChannel)
|
||||
}
|
||||
fs.zoneFSDataChannels[target][dc.Label()] = &DataChannel{
|
||||
DataChannel: dc,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
dc.OnOpen(func() {
|
||||
logger.Printf("got a new open datachannel %s\n", dc.Label())
|
||||
})
|
||||
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if e := fs.HandleDataChannelEvents(event.From, event.EventId, event.Payload); e != nil {
|
||||
logger.Println("*-------------- datachannel error: ", e)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
peerConnection.OnConnectionStateChange(func(pcs webrtc.PeerConnectionState) {
|
||||
if pcs == webrtc.PeerConnectionStateClosed || pcs == webrtc.PeerConnectionStateDisconnected || pcs == webrtc.PeerConnectionStateFailed {
|
||||
logger.Println(pcs)
|
||||
fs.HandleLeavingMember(target)
|
||||
}
|
||||
})
|
||||
peerConnection.OnICEConnectionStateChange(func(is webrtc.ICEConnectionState) {
|
||||
logger.Printf("ICE connection state has changed %s\n", is.String())
|
||||
if is == webrtc.ICEConnectionStateDisconnected || is == webrtc.ICEConnectionStateFailed {
|
||||
logger.Println(is)
|
||||
}
|
||||
})
|
||||
peerConnection.OnICECandidate(func(i *webrtc.ICECandidate) {
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(fs.candidateFlag, func() (err error) {
|
||||
desc := peerConnection.RemoteDescription()
|
||||
if desc == nil {
|
||||
logger.Println("generated candidate appended to list : ", i)
|
||||
fs.pendingCandidates[target] = append(fs.pendingCandidates[target], i)
|
||||
} else {
|
||||
logger.Println("generated candidate : ", i)
|
||||
if iceCandidateErr := cb(from, target, i); iceCandidateErr != nil {
|
||||
logger.Println(iceCandidateErr)
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
})
|
||||
peerConnection.OnNegotiationNeeded(func() {
|
||||
logger.Println("---------------- rennego is needed -----------")
|
||||
_ = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
logger.Println("----------------- sending renego to peer with id", target)
|
||||
if peerConnection.SignalingState() == webrtc.SignalingStateStable {
|
||||
localSd, localSdErr := peerConnection.CreateOffer(nil)
|
||||
if localSdErr != nil {
|
||||
logger.Println(localSdErr)
|
||||
return localSdErr
|
||||
}
|
||||
if err = peerConnection.SetLocalDescription(localSd); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
d, e := sendDCMessage(string(ZONE_FS_WEBRTC_RENNEGOTIATION_OFFER), fs.ZoneID, target, map[string]interface{}{
|
||||
"from": fs.ZoneID,
|
||||
"to": target,
|
||||
"sdp": localSd.SDP,
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
logger.Println(err)
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *FSInstance) HandleLeavingMember(id string) {
|
||||
if err := atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := fs.rtcPeerConnections[id]; !ok {
|
||||
err = fmt.Errorf("no corresponding peerconnection for audio channel leaving member")
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
} else {
|
||||
defer func() {
|
||||
_ = atomicallyExecute(fs.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := fs.rtcPeerConnections[id]; ok {
|
||||
if closeErr := fs.rtcPeerConnections[id].Close(); closeErr != nil {
|
||||
err = closeErr
|
||||
logger.Println("peer connection close error", closeErr)
|
||||
}
|
||||
}
|
||||
delete(fs.rtcPeerConnections, id)
|
||||
return
|
||||
})
|
||||
}()
|
||||
}
|
||||
logger.Printf("peer %s is leaving the squad\n", id)
|
||||
_ = atomicallyExecute(fs.dataChannelMapFlag, func() (err error) {
|
||||
if _, ok := fs.zoneFSDataChannels[id]; ok {
|
||||
for _, dc := range fs.zoneFSDataChannels[id] {
|
||||
dc.DataChannel.Close()
|
||||
}
|
||||
}
|
||||
delete(fs.zoneFSDataChannels, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(fs.localSDMapFlag, func() (err error) {
|
||||
delete(fs.localSD, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(fs.candidateFlag, func() (err error) {
|
||||
delete(fs.pendingCandidates, id)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (fs *FSInstance) HandleDataChannelEvents(from, eventId string, payload map[string]interface{}) (err error) {
|
||||
switch eventId {
|
||||
}
|
||||
return
|
||||
}
|
||||
1
zoneFile.go
Normal file
1
zoneFile.go
Normal file
@ -0,0 +1 @@
|
||||
package localserver
|
||||
234
zoneGrpcMiddleware.go
Normal file
234
zoneGrpcMiddleware.go
Normal file
@ -0,0 +1,234 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
ZONE_OFFER ReqType = "zone_offer"
|
||||
ZONE_ANSWER ReqType = "zone_answer"
|
||||
ZONE_COUNTER_OFFER ReqType = "zone_counter_offer"
|
||||
JOIN_ZONE ReqType = "join_hosted_squad"
|
||||
ZONE_ACCESS_DENIED ReqType = "zone_squad_access_denied"
|
||||
QUIT_ZONE ReqType = "zone_stop_call"
|
||||
ZONE_ACCESS_GRANTED ReqType = "zone_access_granted"
|
||||
INCOMING_ZONE_MEMBER ReqType = "incoming_zone_member"
|
||||
LEAVING_ZONE_MEMBER ReqType = "leaving_zone_member"
|
||||
ZONE_WEBRTC_RENNEGOTIATION_OFFER ReqType = "zone_rennegotiation_offer"
|
||||
ZONE_WEBRTC_RENNEGOTIATION_ANSWER ReqType = "zone_rennegotiation_answer"
|
||||
ZONE_WEBRTC_CANDIDATE ReqType = "zone_webrtc_candidate"
|
||||
NEW_ZONE ReqType = "new_zone"
|
||||
NEW_AUTHORIZED_ZONE_MEMBER ReqType = "new_authorized_zone_member"
|
||||
REMOVED_ZONE_AUTHORIZED_MEMBER ReqType = "removed_zone_authorized_member"
|
||||
)
|
||||
|
||||
type ZoneGrpcMiddleware struct {
|
||||
Manager *ZoneManager
|
||||
stream GrpcManager_LinkClient
|
||||
}
|
||||
|
||||
func NewZoneGrpcMiddleware(manager *ZoneManager) (zoneGrpcMiddleware *ZoneGrpcMiddleware) {
|
||||
zoneGrpcMiddleware = &ZoneGrpcMiddleware{
|
||||
Manager: manager,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneGrpcMiddleware) signalCandidate(to string, candidate *webrtc.ICECandidate) (err error) {
|
||||
err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_WEBRTC_CANDIDATE),
|
||||
From: zm.Manager.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": zm.Manager.ID,
|
||||
"to": to,
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneGrpcMiddleware) Process(ctx context.Context, req *Response, stream GrpcManager_LinkClient) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
switch req.Type {
|
||||
case string(INCOMING_ZONE_MEMBER):
|
||||
case string(LEAVING_ZONE_MEMBER):
|
||||
if err := validateRequest(req.GetPayload(), "zoneId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
if err := zm.Manager.HandleLeavingMember(req.Payload["userId"], req.Payload["zoneId"]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(REMOVED_ZONE_AUTHORIZED_MEMBER):
|
||||
if err := validateRequest(req.GetPayload(), "zoneId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
if err = atomicallyExecute(zm.Manager.zoneFlag, func() (err error) {
|
||||
reqChan := make(chan *ZoneRequest)
|
||||
done, e := zm.Manager.Zones[req.Payload["zoneId"]].ZoneRequestScheduler.Schedule(reqChan)
|
||||
go func() {
|
||||
defer close(reqChan)
|
||||
reqChan <- &ZoneRequest{
|
||||
ReqType: string(REMOVE_USER),
|
||||
From: req.Payload["userId"],
|
||||
Payload: map[string]interface{}{
|
||||
"userId": req.Payload["userId"],
|
||||
},
|
||||
}
|
||||
reqChan <- &ZoneRequest{
|
||||
ReqType: string(REMOVED_ZONE_AUTHORIZED_MEMBER),
|
||||
From: req.Payload["userId"],
|
||||
Payload: map[string]interface{}{
|
||||
"userId": req.Payload["userId"],
|
||||
},
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-e:
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(NEW_AUTHORIZED_ZONE_MEMBER):
|
||||
if err := validateRequest(req.GetPayload(), "zoneId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
if err = atomicallyExecute(zm.Manager.zoneFlag, func() (err error) {
|
||||
reqChan := make(chan *ZoneRequest)
|
||||
done, e := zm.Manager.Zones[req.Payload["zoneId"]].ZoneRequestScheduler.Schedule(reqChan)
|
||||
go func() {
|
||||
defer close(reqChan)
|
||||
reqChan <- &ZoneRequest{
|
||||
ReqType: string(ADD_USER),
|
||||
From: req.Payload["userId"],
|
||||
Payload: map[string]interface{}{
|
||||
"userId": req.Payload["userId"],
|
||||
},
|
||||
}
|
||||
reqChan <- &ZoneRequest{
|
||||
ReqType: string(NEW_AUTHORIZED_ZONE_MEMBER),
|
||||
From: req.Payload["userId"],
|
||||
Payload: map[string]interface{}{
|
||||
"userId": req.Payload["userId"],
|
||||
},
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-e:
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(NEW_ZONE):
|
||||
logger.Println(req.Payload)
|
||||
if err := validateRequest(req.GetPayload(), "zoneId", "zoneName", "zoneImageURL", "zoneOwner", "zoneCreationDate"); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
zone, err := NewZone(zm.Manager.ID, req.Payload["zoneId"], req.Payload["zoneName"], req.Payload["zoneImageURL"], req.Payload["zoneOwner"], req.Payload["zoneCreationDate"], true, []string{req.Payload["zoneOwner"]})
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(zm.Manager.zoneFlag, func() (err error) {
|
||||
zm.Manager.Zones[zone.ID] = zone
|
||||
return
|
||||
})
|
||||
case string(ZONE_OFFER):
|
||||
if err := validateRequest(req.GetPayload(), FROM, TO, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := zm.Manager.HandleOffer(ctx, req.GetPayload(), zm.signalCandidate); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(ZONE_ANSWER):
|
||||
if err := validateRequest(req.GetPayload(), FROM, TO, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := zm.Manager.HandleAnswer(ctx, req.GetPayload()); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(ZONE_COUNTER_OFFER):
|
||||
if err := validateRequest(req.GetPayload(), FROM); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := zm.Manager.HandleCounterOffer(ctx, req.Payload); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(ZONE_WEBRTC_RENNEGOTIATION_OFFER):
|
||||
logger.Println("received negotiation offer")
|
||||
if err := validateRequest(req.GetPayload(), FROM, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := zm.Manager.HandleRennegotiationOffer(req.Payload[FROM], req.Payload[SDP]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(ZONE_WEBRTC_RENNEGOTIATION_ANSWER):
|
||||
logger.Println("received negotiation answer")
|
||||
if err := validateRequest(req.GetPayload(), FROM, SDP); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if err := zm.Manager.HandleRennegotiationAnswer(req.Payload[FROM], req.Payload[SDP]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
case string(ZONE_WEBRTC_CANDIDATE):
|
||||
if err := validateRequest(req.GetPayload(), FROM, "candidate", "sdpMlineIndex", "sdpMid"); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
logger.Println(req.Payload)
|
||||
i, err := strconv.Atoi(req.Payload["sdpMlineIndex"])
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
sdpMlineIndex := uint16(i)
|
||||
sdpMid := req.Payload["sdpMid"]
|
||||
logger.Println(sdpMid, sdpMlineIndex)
|
||||
if err := zm.Manager.AddCandidate(&webrtc.ICECandidateInit{
|
||||
Candidate: req.Payload["candidate"],
|
||||
SDPMid: &sdpMid,
|
||||
SDPMLineIndex: &sdpMlineIndex,
|
||||
}, req.Payload[FROM]); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
default:
|
||||
logger.Println("no request for zon grpc middleware")
|
||||
logger.Println(req.GetPayload())
|
||||
logger.Println(req.Type)
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case err = <-errCh:
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
734
zoneManager.go
Normal file
734
zoneManager.go
Normal file
@ -0,0 +1,734 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
sync "sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
LIST_ZONES_BY_HOST = "list_zones_by_host"
|
||||
LEAVE_ZONE = "leave_zone"
|
||||
)
|
||||
|
||||
type ZoneManager struct {
|
||||
ID string
|
||||
Zones map[string]*Zone
|
||||
LocalSD map[string]*webrtc.SessionDescription
|
||||
RTCPeerConnections map[string]*RTCPeerConnection
|
||||
DataChannels map[string]*DataChannel
|
||||
PendingCandidates map[string][]*webrtc.ICECandidate
|
||||
stream GrpcManager_LinkClient
|
||||
zoneFlag *uint32
|
||||
peerConnectionFlag *uint32
|
||||
localSDFlag *uint32
|
||||
dataChannelFlag *uint32
|
||||
candidateFlag *uint32
|
||||
}
|
||||
|
||||
type Zone struct {
|
||||
ID string
|
||||
Name string
|
||||
ImageURL string
|
||||
Owner string
|
||||
CreationDate string
|
||||
AuthorizedMembers []string
|
||||
DataChannels map[string]*DataChannel
|
||||
DataChannelsFlag *uint32
|
||||
ZoneRequestScheduler *ZoneRequestScheduler
|
||||
Initialized bool
|
||||
}
|
||||
|
||||
func NewZone(hostId string, zoneId string, zoneName string, imageUrl string, owner string, creationDate string, initialized bool, authorizedMembers []string) (zone *Zone, err error) {
|
||||
dataChannels, dataChannelFlag := make(map[string]*DataChannel), uint32(0)
|
||||
zoneChatHandler, err := NewZoneChatsHandler(zoneId, owner, authorizedMembers, dataChannels, &dataChannelFlag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zoneUsersHandler, err := NewZoneUsersHandler(zoneId, owner, authorizedMembers, dataChannels, &dataChannelFlag)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
zoneAudioChannelsHandler, err := NewZoneAudioChannelsHandler(hostId, zoneId, owner, authorizedMembers, dataChannels, &dataChannelFlag)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
zoneVideoChannelsHandler, err := NewZoneVideoChannelsHandler(hostId, zoneId, owner, authorizedMembers, dataChannels, &dataChannelFlag)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
zoneFileHandler, err := NewZoneFileHandler(hostId, zoneId, owner, authorizedMembers, dataChannels, &dataChannelFlag)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
zoneScheduler, e := NewZoneRequestScheduler(authorizedMembers, zoneUsersHandler, zoneChatHandler, zoneAudioChannelsHandler, zoneVideoChannelsHandler, zoneFileHandler)
|
||||
go func() {
|
||||
for schedErr := range e {
|
||||
logger.Println("from scheduler :", schedErr)
|
||||
}
|
||||
}()
|
||||
zone = &Zone{
|
||||
ID: zoneId,
|
||||
Name: zoneName,
|
||||
ImageURL: imageUrl,
|
||||
Owner: owner,
|
||||
CreationDate: creationDate,
|
||||
Initialized: initialized,
|
||||
ZoneRequestScheduler: zoneScheduler,
|
||||
DataChannels: dataChannels,
|
||||
DataChannelsFlag: &dataChannelFlag,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewZoneManager(id string, token string) (zoneManager *ZoneManager, err error) {
|
||||
zoneFlag := uint32(0)
|
||||
peerConnectionFlag := uint32(0)
|
||||
localSDFlag := uint32(0)
|
||||
dataChannelFlag := uint32(0)
|
||||
candidateFlag := uint32(0)
|
||||
dataChannels := make(map[string]*DataChannel)
|
||||
zoneMap := make(map[string]*Zone)
|
||||
zones, err := zoneManager.fetchZones(id, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, zone := range zones {
|
||||
z, err := NewZone(id, zone.ID, zone.Name, zone.ImageURL, zone.Owner, zone.CreationDate, true, zone.AuthorizedMembers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zoneMap[zone.ID] = z
|
||||
}
|
||||
logger.Println(zoneMap)
|
||||
zoneManager = &ZoneManager{
|
||||
ID: id,
|
||||
Zones: zoneMap,
|
||||
LocalSD: make(map[string]*webrtc.SessionDescription),
|
||||
RTCPeerConnections: make(map[string]*RTCPeerConnection),
|
||||
DataChannels: dataChannels,
|
||||
PendingCandidates: make(map[string][]*webrtc.ICECandidate),
|
||||
zoneFlag: &zoneFlag,
|
||||
peerConnectionFlag: &peerConnectionFlag,
|
||||
localSDFlag: &localSDFlag,
|
||||
dataChannelFlag: &dataChannelFlag,
|
||||
candidateFlag: &candidateFlag,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func atomicallyExecute(flag *uint32, job func() (err error)) (err error) {
|
||||
for {
|
||||
if atomic.CompareAndSwapUint32(flag, 0, 1) {
|
||||
defer atomic.SwapUint32(flag, 0)
|
||||
err = job()
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) DeleteZone(zoneId string) {}
|
||||
|
||||
func (zm *ZoneManager) fetchZones(nodeId string, token string) (zones []*Zone, err error) {
|
||||
body, err := json.Marshal(map[string]interface{}{
|
||||
"type": LIST_ZONES_BY_HOST,
|
||||
"token": token,
|
||||
"from": nodeId,
|
||||
"payload": map[string]string{
|
||||
"host": nodeId,
|
||||
"lastIndex": "0",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
res, err := http.Post("https://app.zippytal.com/req", "application/json", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
logger.Println("error come from there inn zone manager")
|
||||
return
|
||||
}
|
||||
bs, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(bs, &zones)
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) CreateOffer(ctx context.Context, target string, from string, zoneId string, cb OnICECandidateFunc) (err error) {
|
||||
peerConnection, err := zm.createPeerConnection(target, from, zoneId, webrtc.SDPTypeOffer, cb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("connection created")
|
||||
rawOffer, err := peerConnection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = peerConnection.SetLocalDescription(rawOffer); err != nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
logger.Println("adding for target", target)
|
||||
zm.RTCPeerConnections[target] = &RTCPeerConnection{
|
||||
PeerConnection: peerConnection,
|
||||
makingOffer: true,
|
||||
makingOfferLock: &sync.Mutex{},
|
||||
negotiate: zm.negotiate,
|
||||
}
|
||||
return
|
||||
})
|
||||
err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_OFFER),
|
||||
From: zm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"to": target,
|
||||
"from": zm.ID,
|
||||
"sdp": rawOffer.SDP,
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) HandleOffer(ctx context.Context, req map[string]string, cb OnICECandidateFunc) (err error) {
|
||||
done, errCh := make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
if _, ok := zm.Zones[req["zoneId"]]; !ok {
|
||||
err = fmt.Errorf("no corresponding zone")
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
logger.Println("handling zone offer")
|
||||
peerConnection, err := zm.createPeerConnection(req[FROM], req[TO], req["zoneId"], webrtc.SDPTypeAnswer, cb)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
logger.Println("peer connection created")
|
||||
_ = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
zm.RTCPeerConnections[req[FROM]] = &RTCPeerConnection{
|
||||
PeerConnection: peerConnection,
|
||||
makingOffer: false,
|
||||
makingOfferLock: &sync.Mutex{},
|
||||
negotiate: zm.negotiate,
|
||||
}
|
||||
return
|
||||
})
|
||||
offer := webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeOffer,
|
||||
SDP: req[SDP],
|
||||
}
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
rawAnswer, err := peerConnection.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(zm.localSDFlag, func() (err error) {
|
||||
zm.LocalSD[req[FROM]] = &rawAnswer
|
||||
return
|
||||
})
|
||||
|
||||
_ = atomicallyExecute(zm.zoneFlag, func() (err error) {
|
||||
//zm.Zones[req[SQUAD_ID]].Members = append(zm.Squads[req[SQUAD_ID]].Members, req[FROM])
|
||||
return
|
||||
})
|
||||
if err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_ANSWER),
|
||||
From: zm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"to": req[FROM],
|
||||
"from": zm.ID,
|
||||
"sdp": rawAnswer.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case err = <-errCh:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) HandleAnswer(ctx context.Context, req map[string]string) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic in handle answer : %v\n", r)
|
||||
}
|
||||
}()
|
||||
if err = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
if _, ok := zm.RTCPeerConnections[req[FROM]]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id : %s", req[FROM])
|
||||
return
|
||||
}
|
||||
peerConnnection := zm.RTCPeerConnections[req[FROM]]
|
||||
logger.Println("---------------------")
|
||||
logger.Println(req[SDP])
|
||||
logger.Println("---------------------")
|
||||
if err = peerConnnection.SetRemoteDescription(webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeAnswer,
|
||||
SDP: req[SDP],
|
||||
}); err != nil {
|
||||
logger.Println("error occured while setting remote description in handle answer")
|
||||
return
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_COUNTER_OFFER),
|
||||
From: zm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": zm.ID,
|
||||
"to": req[FROM],
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(zm.candidateFlag, func() (err error) {
|
||||
for _, candidate := range zm.PendingCandidates[req[FROM]] {
|
||||
logger.Println("sending candidate from answer to", req[FROM])
|
||||
if err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_WEBRTC_CANDIDATE),
|
||||
From: zm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": zm.ID,
|
||||
"to": req[FROM],
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
delete(zm.PendingCandidates, req[FROM])
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(zm.localSDFlag, func() (err error) {
|
||||
delete(zm.LocalSD, req[FROM])
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) HandleCounterOffer(ctx context.Context, req map[string]string) (err error) {
|
||||
logger.Println("handling counter offer 1")
|
||||
if err = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
logger.Println("start job")
|
||||
if _, ok := zm.RTCPeerConnections[req[FROM]]; !ok {
|
||||
logger.Println("error here")
|
||||
err = fmt.Errorf("no field corresponding peer connection for id %s", req[FROM])
|
||||
return
|
||||
}
|
||||
logger.Println("handling counter offer")
|
||||
connection := zm.RTCPeerConnections[req[FROM]]
|
||||
err = atomicallyExecute(zm.localSDFlag, func() (err error) {
|
||||
if err = connection.SetLocalDescription(*zm.LocalSD[req[FROM]]); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("handling counter offer 2")
|
||||
_ = atomicallyExecute(zm.candidateFlag, func() (err error) {
|
||||
for _, candidate := range zm.PendingCandidates[req[FROM]] {
|
||||
logger.Println("sending candidate to", req[FROM])
|
||||
if err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_WEBRTC_CANDIDATE),
|
||||
From: zm.ID,
|
||||
Token: "none",
|
||||
Payload: map[string]string{
|
||||
"from": zm.ID,
|
||||
"to": req[FROM],
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(zm.PendingCandidates, req[FROM])
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(zm.localSDFlag, func() (err error) {
|
||||
delete(zm.LocalSD, req[FROM])
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) createPeerConnection(target string, from string, zoneId string, peerType webrtc.SDPType, cb OnICECandidateFunc) (peerConnection *webrtc.PeerConnection, err error) {
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic : %v\n", r)
|
||||
}
|
||||
}()
|
||||
config := webrtc.Configuration{
|
||||
ICEServers: []webrtc.ICEServer{
|
||||
{
|
||||
URLs: []string{"stun:stun.l.google.com:19302", "stun:stunserver.org:3478"},
|
||||
},
|
||||
},
|
||||
SDPSemantics: webrtc.SDPSemanticsUnifiedPlanWithFallback,
|
||||
}
|
||||
|
||||
peerConnection, err = webrtc.NewPeerConnection(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("---------------------------------------------------")
|
||||
if peerType == webrtc.SDPTypeAnswer {
|
||||
maxRetransmits := uint16(100)
|
||||
channel, err := peerConnection.CreateDataChannel("data", &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqChan := make(chan *ZoneRequest)
|
||||
channel.OnOpen(func() {
|
||||
logger.Println(zoneId)
|
||||
if _, ok := zm.Zones[zoneId]; ok {
|
||||
logger.Println("this zone exist")
|
||||
_ = atomicallyExecute(zm.Zones[zoneId].DataChannelsFlag, func() (err error) {
|
||||
x := int32(0)
|
||||
zm.Zones[zoneId].DataChannels[target] = &DataChannel{DataChannel: channel, bufferedAmountLowThresholdReached: make(<-chan struct{}), l: &x}
|
||||
return
|
||||
})
|
||||
if _, ok := zm.Zones[zoneId]; !ok {
|
||||
err = fmt.Errorf("no corresponding zones")
|
||||
return
|
||||
}
|
||||
done, err := zm.Zones[zoneId].ZoneRequestScheduler.Schedule(reqChan)
|
||||
bs, jsonErr := json.Marshal(&ZoneResponse{
|
||||
Type: "user_zone_init",
|
||||
From: zoneId,
|
||||
To: target,
|
||||
Payload: map[string]interface{}{},
|
||||
})
|
||||
if jsonErr != nil {
|
||||
logger.Println("error in open channel", jsonErr)
|
||||
return
|
||||
}
|
||||
if sendErr := channel.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println("error in open channel send", sendErr)
|
||||
return
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-err:
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
channel.OnClose(func() {
|
||||
close(reqChan)
|
||||
})
|
||||
channel.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var req ZoneRequest
|
||||
if err := json.Unmarshal(msg.Data, &req); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
logger.Println("incoming request", req)
|
||||
reqChan <- &req
|
||||
})
|
||||
logger.Println("new channel for target : ", target)
|
||||
logger.Println(target)
|
||||
_ = atomicallyExecute(zm.dataChannelFlag, func() (err error) {
|
||||
l := int32(0)
|
||||
zm.DataChannels[target] = &DataChannel{
|
||||
DataChannel: channel,
|
||||
bufferedAmountLowThresholdReached: make(<-chan struct{}),
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
} else {
|
||||
peerConnection.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
_ = atomicallyExecute(zm.dataChannelFlag, func() (err error) {
|
||||
l := int32(0)
|
||||
zm.DataChannels[target] = &DataChannel{
|
||||
DataChannel: dc,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
reqChan := make(chan *ZoneRequest)
|
||||
dc.OnOpen(func() {
|
||||
logger.Println(zoneId)
|
||||
if _, ok := zm.Zones[zoneId]; ok {
|
||||
logger.Println("this zone exist")
|
||||
_ = atomicallyExecute(zm.Zones[zoneId].DataChannelsFlag, func() (err error) {
|
||||
x := int32(0)
|
||||
zm.Zones[zoneId].DataChannels[target] = &DataChannel{DataChannel: dc, bufferedAmountLowThresholdReached: make(<-chan struct{}), l: &x}
|
||||
return
|
||||
})
|
||||
done, err := zm.Zones[zoneId].ZoneRequestScheduler.Schedule(reqChan)
|
||||
bs, jsonErr := json.Marshal(&ZoneResponse{
|
||||
Type: "user_zone_init",
|
||||
From: zoneId,
|
||||
To: target,
|
||||
Payload: map[string]interface{}{},
|
||||
})
|
||||
if jsonErr != nil {
|
||||
logger.Println("error in open channel", jsonErr)
|
||||
return
|
||||
}
|
||||
if sendErr := dc.SendText(string(bs)); sendErr != nil {
|
||||
logger.Println("error in open channel send", sendErr)
|
||||
return
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-err:
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
dc.OnClose(func() {
|
||||
close(reqChan)
|
||||
})
|
||||
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var req ZoneRequest
|
||||
if err := json.Unmarshal(msg.Data, &req); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
logger.Println("incoming request", req)
|
||||
reqChan <- &req
|
||||
})
|
||||
})
|
||||
}
|
||||
peerConnection.OnConnectionStateChange(func(pcs webrtc.PeerConnectionState) {
|
||||
if pcs == webrtc.PeerConnectionStateClosed || pcs == webrtc.PeerConnectionStateDisconnected || pcs == webrtc.PeerConnectionStateFailed {
|
||||
logger.Println(pcs)
|
||||
if err = zm.HandleLeavingMember(target, zoneId); err != nil {
|
||||
logger.Println(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
peerConnection.OnNegotiationNeeded(func() {
|
||||
logger.Println("------------------- negotiation is needed --------------------")
|
||||
if pc, ok := zm.RTCPeerConnections[target]; ok {
|
||||
if pc.SignalingState() == webrtc.ICETransportStateConnected {
|
||||
localSd, err := pc.CreateOffer(nil)
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if err = pc.SetLocalDescription(localSd); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_WEBRTC_RENNEGOTIATION_OFFER),
|
||||
From: zm.ID,
|
||||
Token: "",
|
||||
Payload: map[string]string{
|
||||
"to": target,
|
||||
"sdp": localSd.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
peerConnection.OnICEConnectionStateChange(func(is webrtc.ICEConnectionState) {
|
||||
logger.Printf("ICE connection state has changed %s\n", is.String())
|
||||
if is == webrtc.ICEConnectionStateDisconnected || is == webrtc.ICEConnectionStateFailed {
|
||||
logger.Println(is)
|
||||
}
|
||||
})
|
||||
peerConnection.OnICECandidate(func(i *webrtc.ICECandidate) {
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(zm.candidateFlag, func() (err error) {
|
||||
desc := peerConnection.RemoteDescription()
|
||||
if desc == nil {
|
||||
logger.Println("generated candidate appended to list : ", i)
|
||||
zm.PendingCandidates[target] = append(zm.PendingCandidates[target], i)
|
||||
} else {
|
||||
logger.Println("generated candidate : ", i)
|
||||
if iceCandidateErr := cb(target, i); iceCandidateErr != nil {
|
||||
logger.Println(iceCandidateErr)
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) HandleRennegotiationOffer(from string, sdp string) (err error) {
|
||||
err = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
if _, ok := zm.RTCPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
zm.RTCPeerConnections[from].makingOfferLock.Lock()
|
||||
if zm.RTCPeerConnections[from].makingOffer {
|
||||
zm.RTCPeerConnections[from].makingOfferLock.Unlock()
|
||||
err = fmt.Errorf("already making an offer or state is stable")
|
||||
return
|
||||
}
|
||||
zm.RTCPeerConnections[from].makingOfferLock.Unlock()
|
||||
if err = zm.RTCPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeOffer}); err != nil {
|
||||
return
|
||||
}
|
||||
localSd, err := zm.RTCPeerConnections[from].CreateAnswer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = zm.RTCPeerConnections[from].SetLocalDescription(localSd); err != nil {
|
||||
return
|
||||
}
|
||||
if err = zm.stream.Send(&Request{
|
||||
Type: string(ZONE_WEBRTC_RENNEGOTIATION_ANSWER),
|
||||
From: zm.ID,
|
||||
Token: "",
|
||||
Payload: map[string]string{
|
||||
"to": from,
|
||||
"sdp": localSd.SDP,
|
||||
},
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) HandleRennegotiationAnswer(from string, sdp string) (err error) {
|
||||
_ = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
if _, ok := zm.RTCPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
err = zm.RTCPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeAnswer})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) AddCandidate(candidate *webrtc.ICECandidateInit, from string) (err error) {
|
||||
_ = atomicallyExecute(zm.candidateFlag, func() (err error) {
|
||||
if candidate != nil {
|
||||
if connection, ok := zm.RTCPeerConnections[from]; ok {
|
||||
err = connection.AddICECandidate(*candidate)
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) HandleLeavingMember(id string, zoneId string) (err error) {
|
||||
if err = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
if _, ok := zm.RTCPeerConnections[id]; !ok {
|
||||
err = fmt.Errorf("no correponding peerconnection for id %s", id)
|
||||
return
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if err = atomicallyExecute(zm.zoneFlag, func() (err error) {
|
||||
if zone, ok := zm.Zones[zoneId]; ok {
|
||||
for _, handlersPublishers := range zone.ZoneRequestScheduler.handlersPublishers {
|
||||
handlersPublishers <- &ZoneRequest{
|
||||
ReqType: LEAVE_ZONE,
|
||||
From: "node",
|
||||
Payload: map[string]interface{}{
|
||||
"userId": id,
|
||||
},
|
||||
}
|
||||
}
|
||||
if err = atomicallyExecute(zone.DataChannelsFlag, func() (err error) {
|
||||
defer delete(zone.DataChannels, id)
|
||||
if dataChannel, ok := zone.DataChannels[id]; ok {
|
||||
if err = dataChannel.DataChannel.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("no corresponding zone for zoneId %s", zoneId)
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if err = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
if _, ok := zm.RTCPeerConnections[id]; ok {
|
||||
defer delete(zm.RTCPeerConnections, id)
|
||||
if err = zm.RTCPeerConnections[id].Close(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zm *ZoneManager) negotiate(target string, zoneId string) {
|
||||
_ = atomicallyExecute(zm.peerConnectionFlag, func() (err error) {
|
||||
if _, ok := zm.RTCPeerConnections[target]; !ok {
|
||||
return
|
||||
}
|
||||
zm.RTCPeerConnections[target].makingOfferLock.Lock()
|
||||
zm.RTCPeerConnections[target].makingOffer = true
|
||||
zm.RTCPeerConnections[target].makingOfferLock.Unlock()
|
||||
defer func() {
|
||||
zm.RTCPeerConnections[target].makingOfferLock.Lock()
|
||||
zm.RTCPeerConnections[target].makingOffer = false
|
||||
zm.RTCPeerConnections[target].makingOfferLock.Unlock()
|
||||
}()
|
||||
return
|
||||
})
|
||||
}
|
||||
1
zoneMediaHandler.go
Normal file
1
zoneMediaHandler.go
Normal file
@ -0,0 +1 @@
|
||||
package localserver
|
||||
137
zoneRequestScheduler.go
Normal file
137
zoneRequestScheduler.go
Normal file
@ -0,0 +1,137 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ZoneRequestScheduler struct {
|
||||
handlersPublishers []chan<- *ZoneRequest
|
||||
}
|
||||
|
||||
type ZoneRequest struct {
|
||||
ReqType string `json:"reqType"`
|
||||
Payload map[string]interface{} `json:"payload"`
|
||||
From string `json:"from"`
|
||||
}
|
||||
|
||||
type ZoneResponse struct {
|
||||
Type string `json:"type"`
|
||||
To string `json:"to"`
|
||||
From string `json:"from"`
|
||||
Payload map[string]interface{} `json:"payload"`
|
||||
}
|
||||
|
||||
type ZoneRequestHandler interface {
|
||||
Init(ctx context.Context, authorizedMembers []string) (err error)
|
||||
Subscribe(ctx context.Context, publisher <-chan *ZoneRequest) (reqChan chan *ZoneRequest, done chan struct{}, errCh chan error)
|
||||
handleZoneRequest(ctx context.Context, req *ZoneRequest) (err error)
|
||||
}
|
||||
|
||||
func verifyFieldsString(payload map[string]interface{}, fields ...string) (err error) {
|
||||
for _, field := range fields {
|
||||
if _, ok := payload[field]; !ok {
|
||||
err = fmt.Errorf("no field %s in payload", field)
|
||||
return
|
||||
} else if _, ok := payload[field].(string); !ok {
|
||||
err = fmt.Errorf("field %s in payload is not a string", field)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func verifyFieldsBool(payload map[string]interface{}, fields ...string) (err error) {
|
||||
for _, field := range fields {
|
||||
if _, ok := payload[field]; !ok {
|
||||
err = fmt.Errorf("no field %s in payload", field)
|
||||
return
|
||||
} else if _, ok := payload[field].(bool); !ok {
|
||||
err = fmt.Errorf("field %s in payload is not a bool", field)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func verifyFieldsFloat64(payload map[string]interface{}, fields ...string) (err error) {
|
||||
for _, field := range fields {
|
||||
if _, ok := payload[field]; !ok {
|
||||
err = fmt.Errorf("no field %s in payload", field)
|
||||
return
|
||||
} else if _, ok := payload[field].(float64); !ok {
|
||||
err = fmt.Errorf("field %s in payload is not a float64", field)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func verifyFieldsSliceInterface(payload map[string]interface{}, fields ...string) (err error) {
|
||||
for _, field := range fields {
|
||||
if _, ok := payload[field]; !ok {
|
||||
err = fmt.Errorf("no field %s in payload", field)
|
||||
return
|
||||
} else if _, ok := payload[field].([]interface{}); !ok {
|
||||
err = fmt.Errorf("field %s in payload is not a []interface{}", field)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewZoneRequestScheduler(authorizedMembers []string, handlers ...ZoneRequestHandler) (zoneRequestScheduler *ZoneRequestScheduler, handlerErrCh chan error) {
|
||||
zoneRequestScheduler = new(ZoneRequestScheduler)
|
||||
zoneRequestScheduler.handlersPublishers = make([]chan<- *ZoneRequest, 0)
|
||||
handlerErrCh = make(chan error)
|
||||
reqChans := []chan *ZoneRequest{}
|
||||
for _, handler := range handlers {
|
||||
publisher := make(chan *ZoneRequest)
|
||||
zoneRequestScheduler.handlersPublishers = append(zoneRequestScheduler.handlersPublishers, publisher)
|
||||
reqChan, done, errCh := handler.Subscribe(context.Background(), publisher)
|
||||
go func(done <-chan struct{}, errCh <-chan error) {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case handlerErrCh <- <-errCh:
|
||||
}
|
||||
}
|
||||
}(done, errCh)
|
||||
reqChans = append(reqChans, reqChan)
|
||||
}
|
||||
|
||||
for _, reqChan := range reqChans {
|
||||
go func(reqChan <-chan *ZoneRequest) {
|
||||
done, errCh := zoneRequestScheduler.Schedule(reqChan)
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case e := <-errCh:
|
||||
logger.Println("from there", e)
|
||||
}
|
||||
}
|
||||
}(reqChan)
|
||||
}
|
||||
for i, handler := range handlers {
|
||||
if ierr := handler.Init(context.Background(), authorizedMembers); ierr != nil {
|
||||
logger.Println(ierr)
|
||||
}
|
||||
logger.Println("init done for handler", i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zrs *ZoneRequestScheduler) Schedule(reqChan <-chan *ZoneRequest) (done chan struct{}, errCh chan error) {
|
||||
done, errCh = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
for req := range reqChan {
|
||||
for _, publisher := range zrs.handlersPublishers {
|
||||
publisher <- req
|
||||
}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
return
|
||||
}
|
||||
209
zoneUsersDBHandler.go
Normal file
209
zoneUsersDBHandler.go
Normal file
@ -0,0 +1,209 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
)
|
||||
|
||||
type User struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ChatRights string `json:"chatRights"`
|
||||
AudioChannelRights string `json:"audioChannelRights"`
|
||||
VideoChannelRights string `json:"videoChannelRights"`
|
||||
MediaRights string `json:"mediaRights"`
|
||||
FileRights string `json:"fileRights"`
|
||||
KnownChatsId []string `json:"knownChatsId"`
|
||||
KnownAudioChannelsId []string `json:"knownAudioChannelsId"`
|
||||
KnownVideoChannelsId []string `json:"knownVideoChannelsId"`
|
||||
KnownMediaFolderId []string `json:"knownMediaFolderId"`
|
||||
KnownFileFolderId []string `json:"knownFileFolderId"`
|
||||
Admin bool `json:"admin"`
|
||||
Owner bool `json:"owner"`
|
||||
}
|
||||
|
||||
type ZoneUsersDBHandler struct {
|
||||
ZoneID string
|
||||
db func(func(*badger.DB) (err error)) (err error)
|
||||
}
|
||||
|
||||
func NewZoneUsersDBHandler(zoneId string, owner string, authorizedMembers []string, init bool) (zoneChatDBHandler *ZoneUsersDBHandler, err error) {
|
||||
db := func(f func(*badger.DB) (err error)) (err error) {
|
||||
db, err := badger.Open(badger.DefaultOptions(filepath.Join("data", "zones", zoneId, "users")).WithLogger(dbLogger))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
err = f(db)
|
||||
return
|
||||
}
|
||||
if init {
|
||||
admin := &User{
|
||||
ID: owner,
|
||||
Name: owner,
|
||||
ChatRights: "",
|
||||
AudioChannelRights: "",
|
||||
VideoChannelRights: "",
|
||||
MediaRights: "",
|
||||
FileRights: "",
|
||||
KnownChatsId: []string{},
|
||||
KnownAudioChannelsId: []string{},
|
||||
KnownVideoChannelsId: []string{},
|
||||
KnownMediaFolderId: make([]string, 0),
|
||||
KnownFileFolderId: make([]string, 0),
|
||||
Admin: true,
|
||||
Owner: true,
|
||||
}
|
||||
bs, jsonErr := json.Marshal(admin)
|
||||
if jsonErr != nil {
|
||||
return nil, jsonErr
|
||||
}
|
||||
if err = db(func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
err = txn.Set([]byte(owner), bs)
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = db(func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
for _, member := range authorizedMembers {
|
||||
if member != owner {
|
||||
if _, berr := txn.Get([]byte(member)); berr == badger.ErrKeyNotFound {
|
||||
logger.Println("creating new user", member)
|
||||
user := &User{
|
||||
ID: member,
|
||||
Name: member,
|
||||
ChatRights: "",
|
||||
AudioChannelRights: "",
|
||||
VideoChannelRights: "",
|
||||
MediaRights: "",
|
||||
FileRights: "",
|
||||
KnownChatsId: []string{},
|
||||
KnownAudioChannelsId: []string{},
|
||||
KnownVideoChannelsId: []string{},
|
||||
KnownMediaFolderId: make([]string, 0),
|
||||
KnownFileFolderId: make([]string, 0),
|
||||
Admin: false,
|
||||
Owner: false,
|
||||
}
|
||||
bs, jsonErr := json.Marshal(user)
|
||||
if jsonErr != nil {
|
||||
return jsonErr
|
||||
}
|
||||
if err = txn.Set([]byte(member), bs); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
zoneChatDBHandler = &ZoneUsersDBHandler{
|
||||
db: db,
|
||||
ZoneID: zoneId,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneUsersDBHandler) AddNewUser(user *User) (err error) {
|
||||
bs, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) error {
|
||||
if _, notFoundErr := txn.Get([]byte(user.ID)); notFoundErr == badger.ErrKeyNotFound {
|
||||
if updateErr := txn.Set([]byte(user.ID), bs); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneUsersDBHandler) DeleteUser(id string) (err error) {
|
||||
err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
err = txn.Delete([]byte(id))
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneUsersDBHandler) ListUsers(lastIndex int, limit int) (users []*User, err error) {
|
||||
err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.View(func(txn *badger.Txn) error {
|
||||
opt := badger.DefaultIteratorOptions
|
||||
opt.Reverse = true
|
||||
it := txn.NewIterator(opt)
|
||||
defer it.Close()
|
||||
users = make([]*User, 0)
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if err = item.Value(func(val []byte) error {
|
||||
var user *User
|
||||
if jsonErr := json.Unmarshal(val, &user); jsonErr != nil {
|
||||
return jsonErr
|
||||
}
|
||||
users = append(users, user)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneUsersDBHandler) GetUser(id string) (user *User, err error) {
|
||||
err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get([]byte(id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = item.Value(func(val []byte) (err error) {
|
||||
err = json.Unmarshal(val, &user)
|
||||
return
|
||||
})
|
||||
return err
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zcdbh *ZoneUsersDBHandler) ModifyUser(id string, user *User) (err error) {
|
||||
bs, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = zcdbh.db(func(d *badger.DB) (err error) {
|
||||
err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
err = txn.Set([]byte(id), bs)
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
534
zoneUsersHandler.go
Normal file
534
zoneUsersHandler.go
Normal file
@ -0,0 +1,534 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
LIST_ZONE_MEMBERS = "list_zone_members"
|
||||
LIST_ZONE_MEMBERS_RESPONSE = "list_zone_members_response"
|
||||
GET_USER = "get_user"
|
||||
MODIFY_USER_CHAT_RIGHTS = "modify_user_chat_rights"
|
||||
ADD_KNOWN_CHAT = "add_known_chat"
|
||||
REMOVE_KNOWN_CHAT = "remove_known_chat"
|
||||
ADD_KNOWN_AUDIO_CHANNEL = "add_known_audio_channel"
|
||||
REMOVE_KNOWN_AUDIO_CHANNEL = "remove_known_audio_channel"
|
||||
ADD_KNOWN_VIDEO_CHANNEL = "add_known_video_channel"
|
||||
REMOVE_KNOWN_VIDEO_CHANNEL = "remove_known_video_channel"
|
||||
ADD_USER = "add_user"
|
||||
REMOVE_USER = "remove_user"
|
||||
)
|
||||
|
||||
const (
|
||||
NEW_ZONE_USER = "new_zone_user"
|
||||
REMOVED_ZONE_USER = "removed_zone_user"
|
||||
)
|
||||
|
||||
type ZoneUsersHandler struct {
|
||||
ZoneId string
|
||||
ZoneMembersId []string
|
||||
DataChannels map[string]*DataChannel
|
||||
Flag *uint32
|
||||
Publishers []<-chan *ZoneRequest
|
||||
DB *ZoneUsersDBHandler
|
||||
reqChans []chan<- *ZoneRequest
|
||||
}
|
||||
|
||||
type ZoneUserConfig struct {
|
||||
DefaultChatsRights string `json:"defaultChatsRights"`
|
||||
DefaultAudioChannelsRights string `json:"defaultAudioChannelsRights"`
|
||||
DefaultVideoChannelsRights string `json:"defaultVideoChannelsRights"`
|
||||
DefaultMediaRights string `json:"defaultMediaRights"`
|
||||
DefaultFileRights string `json:"defaultFileRights"`
|
||||
AdminRights string `json:"adminRights"`
|
||||
}
|
||||
|
||||
func NewZoneUsersHandler(zoneId string, owner string, authorizedMembers []string, dataChannels map[string]*DataChannel, flag *uint32) (zoneUsersHandler *ZoneUsersHandler, err error) {
|
||||
_, err = os.ReadDir(filepath.Join("data", "zones", zoneId, "users"))
|
||||
var zoneUsersDBHandler *ZoneUsersDBHandler
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
logger.Printf("creating chat directory for zone %s...\n", zoneId)
|
||||
mkdirErr := os.MkdirAll(filepath.Join("data", "zones", zoneId, "users"), 0700)
|
||||
if mkdirErr != nil {
|
||||
return nil, mkdirErr
|
||||
}
|
||||
zoneUsersDBHandler, err = NewZoneUsersDBHandler(zoneId, owner, authorizedMembers, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
file, ferr := os.Create(filepath.Join("data", "zones", zoneId, "users", "usersConfig.json"))
|
||||
if ferr != nil {
|
||||
return nil, ferr
|
||||
}
|
||||
baseConfig := ZoneUserConfig{
|
||||
DefaultChatsRights: "",
|
||||
DefaultAudioChannelsRights: "",
|
||||
DefaultVideoChannelsRights: "",
|
||||
DefaultMediaRights: "",
|
||||
DefaultFileRights: "",
|
||||
AdminRights: "",
|
||||
}
|
||||
bs, jsonErr := json.Marshal(baseConfig)
|
||||
if jsonErr != nil {
|
||||
return nil, jsonErr
|
||||
}
|
||||
if _, writeErr := file.WriteString(string(bs)); writeErr != nil {
|
||||
return nil, writeErr
|
||||
}
|
||||
_ = file.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
zoneUsersDBHandler, err = NewZoneUsersDBHandler(zoneId, owner, authorizedMembers, false)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
zoneUsersHandler = &ZoneUsersHandler{
|
||||
ZoneId: zoneId,
|
||||
DataChannels: dataChannels,
|
||||
ZoneMembersId: authorizedMembers,
|
||||
Flag: flag,
|
||||
DB: zoneUsersDBHandler,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) Init(ctx context.Context, authorizedMembers []string) (err error) {
|
||||
users, err := zuh.DB.ListUsers(0, 1000)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println(authorizedMembers)
|
||||
for _, user := range users {
|
||||
var contain bool
|
||||
for _, member := range authorizedMembers {
|
||||
if user.ID == member {
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !contain {
|
||||
logger.Println("userId", user.ID)
|
||||
if rerr := zuh.RemoveUser(user.ID); rerr != nil {
|
||||
logger.Println(rerr)
|
||||
}
|
||||
go func(userId string) {
|
||||
for _, rc := range zuh.reqChans {
|
||||
logger.Println("------------------------ sending to req chan ------------------------")
|
||||
rc <- &ZoneRequest{
|
||||
ReqType: string(REMOVED_ZONE_AUTHORIZED_MEMBER),
|
||||
From: "node",
|
||||
Payload: map[string]interface{}{
|
||||
"userId": userId,
|
||||
},
|
||||
}
|
||||
}
|
||||
}(user.ID)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) Subscribe(ctx context.Context, publisher <-chan *ZoneRequest) (reqChan chan *ZoneRequest, done chan struct{}, errCh chan error) {
|
||||
reqChan, done, errCh = make(chan *ZoneRequest), make(chan struct{}), make(chan error)
|
||||
zuh.reqChans = append(zuh.reqChans, reqChan)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done <- struct{}{}
|
||||
return
|
||||
case req := <-publisher:
|
||||
if err := zuh.handleZoneRequest(ctx, req); err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) AddUser(userId string) (err error) {
|
||||
newUser := &User{
|
||||
ID: userId,
|
||||
Name: userId,
|
||||
ChatRights: "",
|
||||
AudioChannelRights: "",
|
||||
VideoChannelRights: "",
|
||||
MediaRights: "",
|
||||
FileRights: "",
|
||||
KnownChatsId: []string{},
|
||||
KnownAudioChannelsId: []string{},
|
||||
KnownVideoChannelsId: make([]string, 0),
|
||||
KnownMediaFolderId: make([]string, 0),
|
||||
KnownFileFolderId: make([]string, 0),
|
||||
}
|
||||
if err = zuh.DB.AddNewUser(newUser); err != nil {
|
||||
return
|
||||
}
|
||||
err = atomicallyExecute(zuh.Flag, func() (err error) {
|
||||
users, err := zuh.DB.ListUsers(0, 10000)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bs, jsonErr := json.Marshal(&ZoneResponse{
|
||||
Type: NEW_ZONE_USER,
|
||||
From: "node",
|
||||
To: "user",
|
||||
Payload: map[string]interface{}{
|
||||
"user": newUser,
|
||||
},
|
||||
})
|
||||
if jsonErr != nil {
|
||||
return jsonErr
|
||||
}
|
||||
for _, user := range users {
|
||||
if _, ok := zuh.DataChannels[user.ID]; ok {
|
||||
_ = zuh.DataChannels[user.ID].DataChannel.SendText(string(bs))
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) RemoveUser(userId string) (err error) {
|
||||
if err = zuh.DB.DeleteUser(userId); err != nil {
|
||||
return
|
||||
}
|
||||
err = atomicallyExecute(zuh.Flag, func() (err error) {
|
||||
users, err := zuh.DB.ListUsers(0, 10000)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bs, jsonErr := json.Marshal(&ZoneResponse{
|
||||
Type: REMOVED_ZONE_USER,
|
||||
From: "node",
|
||||
To: "user",
|
||||
Payload: map[string]interface{}{
|
||||
"userId": userId,
|
||||
},
|
||||
})
|
||||
if jsonErr != nil {
|
||||
return jsonErr
|
||||
}
|
||||
if _, ok := zuh.DataChannels[userId]; ok {
|
||||
_ = zuh.DataChannels[userId].DataChannel.SendText(string(bs))
|
||||
}
|
||||
for _, user := range users {
|
||||
if _, ok := zuh.DataChannels[user.ID]; ok {
|
||||
_ = zuh.DataChannels[user.ID].DataChannel.SendText(string(bs))
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) AddKnownChat(chatId string, userId string) (err error) {
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, id := range user.KnownChatsId {
|
||||
if id == chatId {
|
||||
err = fmt.Errorf("user already know channel %s", chatId)
|
||||
return
|
||||
}
|
||||
}
|
||||
user.KnownChatsId = append(user.KnownChatsId, chatId)
|
||||
if err = zuh.DB.ModifyUser(userId, user); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) RemoveKnownChat(chatId string, userId string) (err error) {
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var index int
|
||||
var contain bool
|
||||
for i, id := range user.KnownChatsId {
|
||||
if id == chatId {
|
||||
index = i
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !contain {
|
||||
err = fmt.Errorf("does not know this chat")
|
||||
return
|
||||
}
|
||||
if len(user.KnownChatsId) < 2 {
|
||||
user.KnownChatsId = make([]string, 0)
|
||||
} else {
|
||||
user.KnownChatsId = append(user.KnownChatsId[:index], user.KnownChatsId[index+1:]...)
|
||||
}
|
||||
err = zuh.DB.ModifyUser(userId, user)
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) AddKnownAudioChannel(channelId string, userId string) (err error) {
|
||||
logger.Println("added new audio channel", channelId, userId)
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, id := range user.KnownAudioChannelsId {
|
||||
if id == channelId {
|
||||
err = fmt.Errorf("user already know channel %s", channelId)
|
||||
return
|
||||
}
|
||||
}
|
||||
user.KnownAudioChannelsId = append(user.KnownAudioChannelsId, channelId)
|
||||
if err = zuh.DB.ModifyUser(userId, user); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) RemoveKnownAudioChannel(channelId string, userId string) (err error) {
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var index int
|
||||
var contain bool
|
||||
for i, id := range user.KnownAudioChannelsId {
|
||||
if id == channelId {
|
||||
index = i
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !contain {
|
||||
err = fmt.Errorf("does not know this chat")
|
||||
return
|
||||
}
|
||||
if len(user.KnownAudioChannelsId) < 2 {
|
||||
user.KnownAudioChannelsId = make([]string, 0)
|
||||
} else {
|
||||
user.KnownAudioChannelsId = append(user.KnownAudioChannelsId[:index], user.KnownAudioChannelsId[index+1:]...)
|
||||
}
|
||||
err = zuh.DB.ModifyUser(userId, user)
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) AddKnownVideoChannel(channelId string, userId string) (err error) {
|
||||
logger.Println("added new audio channel", channelId, userId)
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, id := range user.KnownVideoChannelsId {
|
||||
if id == channelId {
|
||||
err = fmt.Errorf("user already know channel %s", channelId)
|
||||
return
|
||||
}
|
||||
}
|
||||
user.KnownVideoChannelsId = append(user.KnownVideoChannelsId, channelId)
|
||||
if err = zuh.DB.ModifyUser(userId, user); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) RemoveKnownVideoChannel(channelId string, userId string) (err error) {
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var index int
|
||||
var contain bool
|
||||
for i, id := range user.KnownVideoChannelsId {
|
||||
if id == channelId {
|
||||
index = i
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !contain {
|
||||
err = fmt.Errorf("does not know this chat")
|
||||
return
|
||||
}
|
||||
if len(user.KnownVideoChannelsId) < 2 {
|
||||
user.KnownVideoChannelsId = make([]string, 0)
|
||||
} else {
|
||||
user.KnownVideoChannelsId = append(user.KnownVideoChannelsId[:index], user.KnownVideoChannelsId[index+1:]...)
|
||||
}
|
||||
err = zuh.DB.ModifyUser(userId, user)
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) AddKnownFolder(folderId string, userId string) (err error) {
|
||||
logger.Println("added new known folder", folderId, userId)
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, id := range user.KnownFileFolderId {
|
||||
if id == folderId {
|
||||
err = fmt.Errorf("user already know channel %s", folderId)
|
||||
return
|
||||
}
|
||||
}
|
||||
user.KnownVideoChannelsId = append(user.KnownVideoChannelsId, folderId)
|
||||
if err = zuh.DB.ModifyUser(userId, user); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) RemoveKnownFolder(folderId string, userId string) (err error) {
|
||||
user, err := zuh.DB.GetUser(userId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var index int
|
||||
var contain bool
|
||||
for i, id := range user.KnownFileFolderId {
|
||||
if id == folderId {
|
||||
index = i
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !contain {
|
||||
err = fmt.Errorf("does not know this chat")
|
||||
return
|
||||
}
|
||||
if len(user.KnownFileFolderId) < 2 {
|
||||
user.KnownFileFolderId = make([]string, 0)
|
||||
} else {
|
||||
user.KnownFileFolderId = append(user.KnownFileFolderId[:index], user.KnownFileFolderId[index+1:]...)
|
||||
}
|
||||
err = zuh.DB.ModifyUser(userId, user)
|
||||
return
|
||||
}
|
||||
|
||||
func (zuh *ZoneUsersHandler) handleZoneRequest(ctx context.Context, req *ZoneRequest) (err error) {
|
||||
logger.Println("got request in zone users handler", req)
|
||||
switch req.ReqType {
|
||||
case REMOVE_USER:
|
||||
if err = verifyFieldsString(req.Payload, "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.RemoveUser(req.Payload["userId"].(string))
|
||||
case ADD_USER:
|
||||
if err = verifyFieldsString(req.Payload, "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.AddUser(req.Payload["userId"].(string))
|
||||
case REMOVE_KNOWN_CHAT:
|
||||
if err = verifyFieldsString(req.Payload, "chatId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.RemoveKnownChat(req.Payload["chatId"].(string), req.Payload["userId"].(string))
|
||||
case ADD_KNOWN_CHAT:
|
||||
if err = verifyFieldsString(req.Payload, "chatId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.AddKnownChat(req.Payload["chatId"].(string), req.Payload["userId"].(string))
|
||||
case REMOVE_KNOWN_AUDIO_CHANNEL:
|
||||
if err = verifyFieldsString(req.Payload, "channelId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.RemoveKnownAudioChannel(req.Payload["channelId"].(string), req.Payload["userId"].(string))
|
||||
case ADD_KNOWN_AUDIO_CHANNEL:
|
||||
if err = verifyFieldsString(req.Payload, "channelId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.AddKnownAudioChannel(req.Payload["channelId"].(string), req.Payload["userId"].(string))
|
||||
case REMOVE_KNOWN_VIDEO_CHANNEL:
|
||||
if err = verifyFieldsString(req.Payload, "channelId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.RemoveKnownVideoChannel(req.Payload["channelId"].(string), req.Payload["userId"].(string))
|
||||
case ADD_KNOWN_VIDEO_CHANNEL:
|
||||
if err = verifyFieldsString(req.Payload, "channelId", "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
err = zuh.AddKnownVideoChannel(req.Payload["channelId"].(string), req.Payload["userId"].(string))
|
||||
case GET_USER:
|
||||
if err = verifyFieldsString(req.Payload, "userId"); err != nil {
|
||||
return
|
||||
}
|
||||
if _, ok := req.Payload["init"]; !ok {
|
||||
err = fmt.Errorf("no field init in req payload for get user")
|
||||
return
|
||||
}
|
||||
if _, ok := req.Payload["init"].(bool); !ok {
|
||||
err = fmt.Errorf("field init is of wrong type")
|
||||
return
|
||||
}
|
||||
user, err := zuh.DB.GetUser(req.Payload["userId"].(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var answer *ZoneResponse
|
||||
if req.Payload["init"].(bool) {
|
||||
answer = &ZoneResponse{
|
||||
Type: "get_current_user_response",
|
||||
To: "",
|
||||
From: "",
|
||||
Payload: map[string]interface{}{
|
||||
"user": user,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
answer = &ZoneResponse{
|
||||
Type: "get_user_response",
|
||||
To: "",
|
||||
From: "",
|
||||
Payload: map[string]interface{}{
|
||||
"user": user,
|
||||
},
|
||||
}
|
||||
}
|
||||
bs, err := json.Marshal(answer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Println(string(bs))
|
||||
if _, ok := zuh.DataChannels[req.From]; ok {
|
||||
if err = zuh.DataChannels[req.From].DataChannel.SendText(string(bs)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case LIST_ZONE_MEMBERS:
|
||||
users, err := zuh.DB.ListUsers(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs, err := json.Marshal(&ZoneResponse{
|
||||
Type: LIST_ZONE_MEMBERS_RESPONSE,
|
||||
To: "",
|
||||
From: "",
|
||||
Payload: map[string]interface{}{
|
||||
"users": users,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := zuh.DataChannels[req.From]; ok {
|
||||
if err = zuh.DataChannels[req.From].DataChannel.SendText(string(bs)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case MODIFY_USER_CHAT_RIGHTS:
|
||||
}
|
||||
return
|
||||
}
|
||||
851
zoneVideoChannel.go
Normal file
851
zoneVideoChannel.go
Normal file
@ -0,0 +1,851 @@
|
||||
package localserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
sync "sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pion/rtcp"
|
||||
"github.com/pion/webrtc/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
VIDEO_CHANNEL_ACCESS_DENIED ReqType = "video_channel_access_denied"
|
||||
VIDEO_CHANNEL_STOP_CALL ReqType = "video_channel_stop_call"
|
||||
VIDEO_CHANNEL_ACCESS_GRANTED ReqType = "video_channel_access_granted"
|
||||
VIDEO_CHANNEL_WEBRTC_OFFER ReqType = "video_channel_offer"
|
||||
VIDEO_CHANNEL_WEBRTC_ANSWER ReqType = "video_channel_answer"
|
||||
VIDEO_CHANNEL_WEBRTC_RENNEGOTIATION_OFFER ReqType = "video_channel_rennegotiation_offer"
|
||||
VIDEO_CHANNEL_WEBRTC_RENNEGOTIATION_ANSWER ReqType = "video_channel_rennegotiation_answer"
|
||||
VIDEO_CHANNEL_WEBRTC_COUNTER_OFFER ReqType = "video_channel_webrtc_counter_offer"
|
||||
VIDEO_CHANNEL_WEBRTC_CANDIDATE ReqType = "video_channel_webrtc_candidate"
|
||||
VIDEO_CHANNEL_REMOVE_VIDEO ReqType = "video_channel_remove_video"
|
||||
GET_VIDEO_CHANNEL_TRACKS ReqType = "video_channel_get_tracks"
|
||||
)
|
||||
|
||||
const (
|
||||
VIDEO_CHANNEL_USER_VIDEO_STOP = "video_channel_user_video_stop"
|
||||
VIDEO_CHANNEL_USER_VIDEO_RESUME = "video_channel_user_video_resume"
|
||||
VIDEO_CHANNEL_USER_MUTE = "video_channel_user_mute"
|
||||
VIDEO_CHANNEL_USER_UNMUTE = "video_channel_user_unmute"
|
||||
VIDEO_CHANNEL_USER_SPEAKING = "video_channel_user_speaking"
|
||||
VIDEO_CHANNEL_USER_STOPPED_SPEAKING = "video_channel_user_stopped_speaking"
|
||||
)
|
||||
|
||||
type VideoChannel struct {
|
||||
ID string `json:"id"`
|
||||
Owner string `json:"owner"`
|
||||
ChannelType string `json:"channelType"`
|
||||
Members []string `json:"members"`
|
||||
CurrentMembersId []string `json:"currentMembersId"`
|
||||
CurrentMembers map[string]*VideoChannelMember `json:"currentMembers"`
|
||||
localSD map[string]*webrtc.SessionDescription `json:"-"`
|
||||
rtcPeerConnections map[string]*ZoneRTCPeerConnection `json:"-"`
|
||||
audioTransceiver map[string][]*PeerSender `json:"-"`
|
||||
videoTransceiver map[string][]*PeerSender `json:"-"`
|
||||
videoChannelDataChannels map[string]*DataChannel `json:"-"`
|
||||
pendingCandidates map[string][]*webrtc.ICECandidate `json:"-"`
|
||||
remoteTracks map[string][]*RemoteTrack `json:"-"`
|
||||
middlewares []interface{} `json:"-"`
|
||||
candidateFlag *uint32 `json:"-"`
|
||||
remoteTracksFlag *uint32 `json:"-"`
|
||||
rtcPeerConnectionMapFlag *uint32 `json:"-"`
|
||||
dataChannelMapFlag *uint32 `json:"-"`
|
||||
localSDMapFlag *uint32 `json:"-"`
|
||||
audioSenderFlag *uint32 `json:"-"`
|
||||
videoSenderFlag *uint32 `json:"-"`
|
||||
}
|
||||
|
||||
type VideoChannelOnICECandidateFunc = func(string, string, *webrtc.ICECandidate) error
|
||||
|
||||
func NewVideoChannel(id string, owner string, channelType string, members []string, currentMembersId []string, currentMembers map[string]*VideoChannelMember) (audioChannel *VideoChannel) {
|
||||
candidateFlag := uint32(0)
|
||||
remoteTracksFlag := uint32(0)
|
||||
rtcPeerConnectionMapFlag := uint32(0)
|
||||
dataChannelMapFlag := uint32(0)
|
||||
localSDMapFlag := uint32(0)
|
||||
audioSenderFlag := uint32(0)
|
||||
videoSenderFlag := uint32(0)
|
||||
audioChannel = &VideoChannel{
|
||||
ID: id,
|
||||
Owner: owner,
|
||||
ChannelType: channelType,
|
||||
Members: members,
|
||||
CurrentMembersId: currentMembersId,
|
||||
CurrentMembers: currentMembers,
|
||||
localSD: make(map[string]*webrtc.SessionDescription),
|
||||
videoTransceiver: make(map[string][]*PeerSender),
|
||||
rtcPeerConnections: make(map[string]*ZoneRTCPeerConnection),
|
||||
audioTransceiver: make(map[string][]*PeerSender),
|
||||
videoChannelDataChannels: make(map[string]*DataChannel),
|
||||
pendingCandidates: make(map[string][]*webrtc.ICECandidate),
|
||||
remoteTracks: make(map[string][]*RemoteTrack),
|
||||
middlewares: make([]interface{}, 0),
|
||||
candidateFlag: &candidateFlag,
|
||||
remoteTracksFlag: &remoteTracksFlag,
|
||||
rtcPeerConnectionMapFlag: &rtcPeerConnectionMapFlag,
|
||||
dataChannelMapFlag: &dataChannelMapFlag,
|
||||
localSDMapFlag: &localSDMapFlag,
|
||||
audioSenderFlag: &audioSenderFlag,
|
||||
videoSenderFlag: &videoSenderFlag,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) HandleOffer(ctx context.Context, channelId string, userId string, sdp string, hostId string, sendDCMessage SendDCMessageFunc, cb VideoChannelOnICECandidateFunc) (done chan struct{}, errCh chan error) {
|
||||
done, errCh = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
peerConnection, err := vc.createPeerConnection(userId, vc.ID, webrtc.SDPTypeAnswer, cb, sendDCMessage)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
vc.rtcPeerConnections[userId] = &ZoneRTCPeerConnection{
|
||||
PeerConnection: peerConnection,
|
||||
makingOffer: false,
|
||||
makingOfferLock: &sync.Mutex{},
|
||||
negotiate: vc.negotiate,
|
||||
}
|
||||
return
|
||||
})
|
||||
offer := webrtc.SessionDescription{
|
||||
Type: webrtc.SDPTypeOffer,
|
||||
SDP: sdp,
|
||||
}
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
rawAnswer, err := peerConnection.CreateAnswer(nil)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(vc.localSDMapFlag, func() (err error) {
|
||||
vc.localSD[userId] = &rawAnswer
|
||||
return
|
||||
})
|
||||
_, _ = sendDCMessage(string(VIDEO_CHANNEL_WEBRTC_ANSWER), hostId, userId, map[string]interface{}{
|
||||
"to": userId,
|
||||
"from": vc.ID,
|
||||
"channelId": channelId,
|
||||
"sdp": rawAnswer.SDP,
|
||||
})
|
||||
done <- struct{}{}
|
||||
logger.Println("handle offer done")
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) HandleCounterOffer(ctx context.Context, userId string, sendDCMessage SendDCMessageFunc) (err error) {
|
||||
if err = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := vc.rtcPeerConnections[userId]; !ok {
|
||||
err = fmt.Errorf("no field corresponding peer connection for id %s", userId)
|
||||
return
|
||||
}
|
||||
logger.Println("handling counter offer")
|
||||
connection := vc.rtcPeerConnections[userId]
|
||||
err = atomicallyExecute(vc.localSDMapFlag, func() (err error) {
|
||||
err = connection.SetLocalDescription(*vc.localSD[userId])
|
||||
return
|
||||
})
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(vc.localSDMapFlag, func() (err error) {
|
||||
delete(vc.localSD, userId)
|
||||
return
|
||||
})
|
||||
if err = atomicallyExecute(vc.candidateFlag, func() (err error) {
|
||||
for _, candidate := range vc.pendingCandidates[userId] {
|
||||
logger.Println("sending candidate to", userId, candidate)
|
||||
d, e := sendDCMessage(string(VIDEO_CHANNEL_WEBRTC_CANDIDATE), "", userId, map[string]interface{}{
|
||||
"from": vc.ID,
|
||||
"to": userId,
|
||||
"candidate": candidate.ToJSON().Candidate,
|
||||
"sdpMid": *candidate.ToJSON().SDPMid,
|
||||
"sdpMlineIndex": strconv.Itoa(int(*candidate.ToJSON().SDPMLineIndex)),
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(vc.pendingCandidates, userId)
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) HandleRennegotiationOffer(from string, sdp string, sendDCMessage SendDCMessageFunc) (err error) {
|
||||
err = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := vc.rtcPeerConnections[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
return
|
||||
}
|
||||
vc.rtcPeerConnections[from].makingOfferLock.Lock()
|
||||
if vc.rtcPeerConnections[from].makingOffer {
|
||||
vc.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
return fmt.Errorf("already making an offer or state is stable")
|
||||
}
|
||||
vc.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
if err = vc.rtcPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeOffer}); err != nil {
|
||||
return
|
||||
}
|
||||
localSd, err := vc.rtcPeerConnections[from].CreateAnswer(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = vc.rtcPeerConnections[from].SetLocalDescription(localSd); err != nil {
|
||||
return
|
||||
}
|
||||
d, e := sendDCMessage(string(VIDEO_CHANNEL_WEBRTC_RENNEGOTIATION_ANSWER), vc.ID, from, map[string]interface{}{
|
||||
"from": vc.ID,
|
||||
"to": from,
|
||||
"sdp": localSd.SDP,
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) HandleRennegotiationAnswer(from string, sdp string) (err error) {
|
||||
logger.Println("---------------------handling rennego answer")
|
||||
err = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
// vc.rtcPeerConnections[from].makingOfferLock.Lock()
|
||||
// if vc.rtcPeerConnections[from].makingOffer {
|
||||
// vc.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
// return fmt.Errorf("already making an offer or state is stable")
|
||||
// }
|
||||
// vc.rtcPeerConnections[from].makingOfferLock.Unlock()
|
||||
// if _, ok := vc.rtcPeerConnections[from]; !ok {
|
||||
// err = fmt.Errorf("no corresponding peer connection for id %s", from)
|
||||
// return
|
||||
// }
|
||||
err = vc.rtcPeerConnections[from].SetRemoteDescription(webrtc.SessionDescription{SDP: sdp, Type: webrtc.SDPTypeAnswer})
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) AddCandidate(candidate *webrtc.ICECandidateInit, from string) (err error) {
|
||||
logger.Println("adding ice candidate", candidate)
|
||||
err = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := vc.rtcPeerConnections[from]; ok && candidate != nil {
|
||||
err = vc.rtcPeerConnections[from].AddICECandidate(*candidate)
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) createPeerConnection(target string, from string, peerType webrtc.SDPType, cb VideoChannelOnICECandidateFunc, sendDCMessage SendDCMessageFunc) (peerConnection *webrtc.PeerConnection, err error) {
|
||||
defer func() {
|
||||
if r := recover(); err != nil {
|
||||
logger.Printf("recover from panic : %v\n", r)
|
||||
}
|
||||
}()
|
||||
config := webrtc.Configuration{
|
||||
ICEServers: []webrtc.ICEServer{
|
||||
{
|
||||
URLs: []string{"stun:stun.l.google.com:19302", "stun:stunserver.org:3478"},
|
||||
},
|
||||
},
|
||||
SDPSemantics: webrtc.SDPSemanticsUnifiedPlanWithFallback,
|
||||
}
|
||||
|
||||
peerConnection, err = webrtc.NewPeerConnection(config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logger.Println("---------------------------------------------------")
|
||||
if peerType == webrtc.SDPTypeAnswer {
|
||||
maxRetransmits := uint16(100)
|
||||
channel, err := peerConnection.CreateDataChannel("video-channel", &webrtc.DataChannelInit{
|
||||
MaxRetransmits: &maxRetransmits,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channel.OnOpen(func() {
|
||||
logger.Println("channel opened")
|
||||
if chanErr := channel.SendText("yooo man this is open"); chanErr != nil {
|
||||
logger.Println(chanErr)
|
||||
}
|
||||
})
|
||||
channel.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if e := vc.HandleDataChannelEvents(event.From, event.EventId, event.Payload); e != nil {
|
||||
logger.Println("*-------------- datachannel error: ", e)
|
||||
}
|
||||
})
|
||||
logger.Println("new channel for target : ", target)
|
||||
_ = atomicallyExecute(vc.dataChannelMapFlag, func() (err error) {
|
||||
logger.Println(target)
|
||||
l := int32(0)
|
||||
vc.videoChannelDataChannels[target] = &DataChannel{
|
||||
DataChannel: channel,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
} else {
|
||||
peerConnection.OnDataChannel(func(dc *webrtc.DataChannel) {
|
||||
_ = atomicallyExecute(vc.dataChannelMapFlag, func() (err error) {
|
||||
l := int32(0)
|
||||
vc.videoChannelDataChannels[target] = &DataChannel{
|
||||
DataChannel: dc,
|
||||
l: &l,
|
||||
}
|
||||
return
|
||||
})
|
||||
dc.OnOpen(func() {
|
||||
logger.Printf("got a new open datachannel %s\n", dc.Label())
|
||||
})
|
||||
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
|
||||
var event CallEvent
|
||||
if err := json.Unmarshal(msg.Data, &event); err != nil {
|
||||
logger.Println(err)
|
||||
return
|
||||
}
|
||||
if e := vc.HandleDataChannelEvents(event.From, event.EventId, event.Payload); e != nil {
|
||||
logger.Println("*-------------- datachannel error: ", e)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
err = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
err = atomicallyExecute(vc.remoteTracksFlag, func() (err error) {
|
||||
logger.Println("------------------", vc.CurrentMembersId)
|
||||
for _, id := range vc.CurrentMembersId {
|
||||
logger.Println(id)
|
||||
if id != target {
|
||||
if _, ok := vc.remoteTracks[id]; !ok {
|
||||
continue
|
||||
}
|
||||
for _, track := range vc.remoteTracks[id] {
|
||||
transceiver, err := peerConnection.AddTransceiverFromKind(track.Track.Kind(), webrtc.RTPTransceiverInit{Direction: webrtc.RTPTransceiverDirectionSendonly})
|
||||
if err != nil {
|
||||
logger.Println("add track error")
|
||||
continue
|
||||
}
|
||||
if err := transceiver.Sender().ReplaceTrack(track.Track); err != nil {
|
||||
logger.Println("add track error")
|
||||
continue
|
||||
}
|
||||
if track.Track.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
_ = atomicallyExecute(vc.videoSenderFlag, func() (err error) {
|
||||
if len(vc.videoTransceiver) == 0 {
|
||||
vc.videoTransceiver[id] = []*PeerSender{{ID: target, Transceiver: transceiver}}
|
||||
} else {
|
||||
vc.videoTransceiver[id] = append(vc.videoTransceiver[id], &PeerSender{ID: target, Transceiver: transceiver})
|
||||
}
|
||||
return
|
||||
})
|
||||
} else if track.Track.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
_ = atomicallyExecute(vc.audioSenderFlag, func() (err error) {
|
||||
if len(vc.audioTransceiver) == 0 {
|
||||
vc.audioTransceiver[id] = []*PeerSender{{ID: target, Transceiver: transceiver}}
|
||||
} else {
|
||||
vc.audioTransceiver[id] = append(vc.audioTransceiver[id], &PeerSender{ID: target, Transceiver: transceiver})
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
logger.Println("track added", track)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
return
|
||||
})
|
||||
peerConnection.OnConnectionStateChange(func(pcs webrtc.PeerConnectionState) {
|
||||
if pcs == webrtc.PeerConnectionStateClosed || pcs == webrtc.PeerConnectionStateDisconnected || pcs == webrtc.PeerConnectionStateFailed {
|
||||
logger.Println(pcs)
|
||||
//vc.HandleLeavingMember(target, squadId)
|
||||
}
|
||||
})
|
||||
peerConnection.OnICEConnectionStateChange(func(is webrtc.ICEConnectionState) {
|
||||
logger.Printf("ICE connection state has changed %s\n", is.String())
|
||||
if is == webrtc.ICEConnectionStateDisconnected || is == webrtc.ICEConnectionStateFailed {
|
||||
logger.Println(is)
|
||||
}
|
||||
})
|
||||
peerConnection.OnTrack(func(tr *webrtc.TrackRemote, r *webrtc.RTPReceiver) {
|
||||
logger.Println("got new track")
|
||||
defer func() {
|
||||
if stopErr := r.Stop(); stopErr != nil {
|
||||
logger.Println(stopErr)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
ticker := time.NewTicker(1500 * time.Millisecond)
|
||||
for range ticker.C {
|
||||
if rtcpSendErr := peerConnection.WriteRTCP([]rtcp.Packet{&rtcp.PictureLossIndication{MediaSSRC: uint32(tr.SSRC())}}); rtcpSendErr != nil {
|
||||
logger.Println(rtcpSendErr)
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
uniqId := uuid.New()
|
||||
i := fmt.Sprintf("%s/%s", target, uniqId.String())
|
||||
logger.Println("*************************----------------", i, "-----------------------***************")
|
||||
localTrack, newTrackErr := webrtc.NewTrackLocalStaticRTP(tr.Codec().RTPCodecCapability, i, i)
|
||||
if newTrackErr != nil {
|
||||
return
|
||||
}
|
||||
logger.Println(localTrack)
|
||||
rtpbuf := make([]byte, 1400)
|
||||
flag := int32(0)
|
||||
remote := &RemoteTrack{ID: target, Track: localTrack, rdv: &flag}
|
||||
_ = atomicallyExecute(vc.remoteTracksFlag, func() (err error) {
|
||||
if len(vc.remoteTracks[target]) == 0 {
|
||||
vc.remoteTracks[target] = []*RemoteTrack{remote}
|
||||
} else {
|
||||
vc.remoteTracks[target] = append(vc.remoteTracks[target], remote)
|
||||
}
|
||||
index := len(vc.remoteTracks[target])
|
||||
logger.Println(index, vc.remoteTracks)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
for _, id := range vc.CurrentMembersId {
|
||||
if id != target {
|
||||
if _, ok := vc.rtcPeerConnections[id]; !ok {
|
||||
continue
|
||||
}
|
||||
connection := vc.rtcPeerConnections[id]
|
||||
transceiver, tranceiverErr := connection.AddTransceiverFromKind(localTrack.Kind(), webrtc.RTPTransceiverInit{Direction: webrtc.RTPTransceiverDirectionSendonly})
|
||||
if tranceiverErr != nil {
|
||||
logger.Println(tranceiverErr)
|
||||
continue
|
||||
}
|
||||
if replaceTrackErr := transceiver.Sender().ReplaceTrack(localTrack); replaceTrackErr != nil {
|
||||
logger.Println(replaceTrackErr)
|
||||
continue
|
||||
}
|
||||
// go func() {
|
||||
// rtcpBuf := make([]byte, 1500)
|
||||
// for {
|
||||
// if _, _, rtcpErr := transceiver.Sender().Read(rtcpBuf); rtcpErr != nil {
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
if localTrack.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
_ = atomicallyExecute(vc.audioSenderFlag, func() (err error) {
|
||||
if len(vc.audioTransceiver) == 0 {
|
||||
vc.audioTransceiver[target] = []*PeerSender{{ID: id, Transceiver: transceiver}}
|
||||
} else {
|
||||
vc.audioTransceiver[target] = append(vc.audioTransceiver[target], &PeerSender{ID: id, Transceiver: transceiver})
|
||||
}
|
||||
return
|
||||
})
|
||||
} else if localTrack.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
_ = atomicallyExecute(vc.videoSenderFlag, func() (err error) {
|
||||
if len(vc.videoTransceiver) == 0 {
|
||||
vc.videoTransceiver[target] = []*PeerSender{{ID: id, Transceiver: transceiver}}
|
||||
} else {
|
||||
vc.videoTransceiver[target] = append(vc.videoTransceiver[target], &PeerSender{ID: id, Transceiver: transceiver})
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
<-time.After(time.Millisecond * 500)
|
||||
connection.negotiate(id, sendDCMessage)
|
||||
}()
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
d := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
i, _, readErr := tr.Read(rtpbuf)
|
||||
if readErr != nil {
|
||||
logger.Println(readErr)
|
||||
break
|
||||
}
|
||||
f := atomic.LoadInt32(remote.rdv)
|
||||
if f == 0 {
|
||||
if _, writeErr := localTrack.Write(rtpbuf[:i]); writeErr != nil && !errors.Is(writeErr, io.ErrClosedPipe) {
|
||||
logger.Println(writeErr)
|
||||
break
|
||||
} else {
|
||||
_ = rtpbuf[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
d <- struct{}{}
|
||||
}()
|
||||
<-d
|
||||
})
|
||||
peerConnection.OnICECandidate(func(i *webrtc.ICECandidate) {
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
_ = atomicallyExecute(vc.candidateFlag, func() (err error) {
|
||||
desc := peerConnection.RemoteDescription()
|
||||
if desc == nil {
|
||||
logger.Println("generated candidate appended to list : ", i)
|
||||
vc.pendingCandidates[target] = append(vc.pendingCandidates[target], i)
|
||||
} else {
|
||||
logger.Println("generated candidate : ", i)
|
||||
if iceCandidateErr := cb(from, target, i); iceCandidateErr != nil {
|
||||
logger.Println(iceCandidateErr)
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
})
|
||||
peerConnection.OnNegotiationNeeded(func() {
|
||||
logger.Println("---------------- rennego is needed -----------")
|
||||
// _ = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
// for _, id := range vc.CurrentMembersId {
|
||||
// logger.Println("----------------- sending renego to peer with id", id)
|
||||
// if _, ok := vc.rtcPeerConnections[id]; !ok {
|
||||
// continue
|
||||
// }
|
||||
// if peerConnection.SignalingState() == webrtc.SignalingStateStable {
|
||||
// localSd, localSdErr := peerConnection.CreateOffer(nil)
|
||||
// if localSdErr != nil {
|
||||
// logger.Println(localSdErr)
|
||||
// return localSdErr
|
||||
// }
|
||||
// if err = peerConnection.SetLocalDescription(localSd); err != nil {
|
||||
// logger.Println(err)
|
||||
// return
|
||||
// }
|
||||
// d, e := sendDCMessage(string(VIDEO_CHANNEL_WEBRTC_RENNEGOTIATION_OFFER), vc.ID, id, map[string]interface{}{
|
||||
// "from": vc.ID,
|
||||
// "to": id,
|
||||
// "sdp": localSd.SDP,
|
||||
// })
|
||||
// select {
|
||||
// case <-d:
|
||||
// case err = <-e:
|
||||
// logger.Println(err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return
|
||||
// })
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) HandleLeavingMember(id string) {
|
||||
if err := atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := vc.rtcPeerConnections[id]; !ok {
|
||||
err = fmt.Errorf("no corresponding peerconnection for audio channel leaving member")
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
logger.Println(err)
|
||||
} else {
|
||||
defer func() {
|
||||
_ = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := vc.rtcPeerConnections[id]; ok {
|
||||
if closeErr := vc.rtcPeerConnections[id].Close(); closeErr != nil {
|
||||
err = closeErr
|
||||
logger.Println("peer connection close error", closeErr)
|
||||
}
|
||||
}
|
||||
delete(vc.rtcPeerConnections, id)
|
||||
return
|
||||
})
|
||||
}()
|
||||
}
|
||||
logger.Printf("peer %s is leaving the squad\n", id)
|
||||
_ = atomicallyExecute(vc.dataChannelMapFlag, func() (err error) {
|
||||
if _, ok := vc.videoChannelDataChannels[id]; ok {
|
||||
vc.videoChannelDataChannels[id].DataChannel.Close()
|
||||
}
|
||||
delete(vc.videoChannelDataChannels, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(vc.localSDMapFlag, func() (err error) {
|
||||
delete(vc.localSD, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(vc.candidateFlag, func() (err error) {
|
||||
delete(vc.pendingCandidates, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(vc.audioSenderFlag, func() (err error) {
|
||||
for peerId, peerSender := range vc.audioTransceiver {
|
||||
if peerId != id {
|
||||
logger.Println("senders", peerSender)
|
||||
c := 0
|
||||
for i, sender := range peerSender {
|
||||
if sender.ID == id {
|
||||
if senderErr := sender.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if transceiverErr := sender.Transceiver.Stop(); transceiverErr != nil {
|
||||
logger.Println("transceiverErr occured with video", transceiverErr)
|
||||
}
|
||||
peerSender[len(peerSender)-i-1], peerSender[i] = peerSender[i], peerSender[len(peerSender)-i-1]
|
||||
c++
|
||||
}
|
||||
}
|
||||
vc.audioTransceiver[peerId] = vc.audioTransceiver[peerId][:len(peerSender)-(c)]
|
||||
logger.Println(vc.audioTransceiver[peerId])
|
||||
}
|
||||
}
|
||||
for _, transceiver := range vc.audioTransceiver[id] {
|
||||
if senderErr := transceiver.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if stopErr := transceiver.Transceiver.Stop(); stopErr != nil {
|
||||
logger.Println("transceiver audio stop error", stopErr)
|
||||
}
|
||||
}
|
||||
delete(vc.audioTransceiver, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(vc.videoSenderFlag, func() (err error) {
|
||||
for peerId, peerSender := range vc.videoTransceiver {
|
||||
if peerId != id {
|
||||
logger.Println("senders", peerSender)
|
||||
c := 0
|
||||
for i, sender := range peerSender {
|
||||
if sender.ID == id {
|
||||
if senderErr := sender.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if transceiverErr := sender.Transceiver.Stop(); transceiverErr != nil {
|
||||
logger.Println("transceiverErr occured with video", transceiverErr)
|
||||
}
|
||||
peerSender[len(peerSender)-i-1], peerSender[i] = peerSender[i], peerSender[len(peerSender)-i-1]
|
||||
c++
|
||||
}
|
||||
}
|
||||
vc.videoTransceiver[peerId] = vc.videoTransceiver[peerId][:len(peerSender)-(c)]
|
||||
logger.Println(vc.videoTransceiver[peerId])
|
||||
}
|
||||
}
|
||||
for _, transceiver := range vc.videoTransceiver[id] {
|
||||
if senderErr := transceiver.Transceiver.Sender().Stop(); senderErr != nil {
|
||||
logger.Println(senderErr)
|
||||
}
|
||||
if stopErr := transceiver.Transceiver.Stop(); stopErr != nil {
|
||||
logger.Println("transceiver video stop error", stopErr)
|
||||
}
|
||||
}
|
||||
delete(vc.videoTransceiver, id)
|
||||
return
|
||||
})
|
||||
_ = atomicallyExecute(vc.remoteTracksFlag, func() (err error) {
|
||||
delete(vc.remoteTracks, id)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) negotiate(target string, sendDCMessage SendDCMessageFunc) {
|
||||
logger.Println("------------------negotiate is called")
|
||||
_ = atomicallyExecute(vc.rtcPeerConnectionMapFlag, func() (err error) {
|
||||
if _, ok := vc.rtcPeerConnections[target]; !ok {
|
||||
return
|
||||
}
|
||||
vc.rtcPeerConnections[target].makingOfferLock.Lock()
|
||||
vc.rtcPeerConnections[target].makingOffer = true
|
||||
vc.rtcPeerConnections[target].makingOfferLock.Unlock()
|
||||
defer func() {
|
||||
vc.rtcPeerConnections[target].makingOfferLock.Lock()
|
||||
vc.rtcPeerConnections[target].makingOffer = false
|
||||
vc.rtcPeerConnections[target].makingOfferLock.Unlock()
|
||||
}()
|
||||
|
||||
for _, id := range vc.CurrentMembersId {
|
||||
logger.Println("----------------- sending renego to peer with id", id)
|
||||
if _, ok := vc.rtcPeerConnections[id]; !ok {
|
||||
continue
|
||||
}
|
||||
connection := vc.rtcPeerConnections[id]
|
||||
if connection.SignalingState() == webrtc.SignalingStateStable {
|
||||
localSd, err := connection.CreateOffer(nil)
|
||||
if err != nil {
|
||||
logger.Println(err)
|
||||
return err
|
||||
}
|
||||
if err = connection.SetLocalDescription(localSd); err != nil {
|
||||
logger.Println(err)
|
||||
return err
|
||||
}
|
||||
d, e := sendDCMessage(string(VIDEO_CHANNEL_WEBRTC_RENNEGOTIATION_OFFER), vc.ID, id, map[string]interface{}{
|
||||
"from": vc.ID,
|
||||
"to": id,
|
||||
"sdp": localSd.SDP,
|
||||
})
|
||||
select {
|
||||
case <-d:
|
||||
case err = <-e:
|
||||
logger.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) broadcastDatachannelMessage(from string, eventId string, payload map[string]interface{}) (done chan struct{}, errCh chan error) {
|
||||
done, errCh = make(chan struct{}), make(chan error)
|
||||
go func() {
|
||||
bs, jsonErr := json.Marshal(&ZoneResponse{
|
||||
Type: eventId,
|
||||
From: vc.ID,
|
||||
Payload: payload,
|
||||
})
|
||||
if jsonErr != nil {
|
||||
errCh <- jsonErr
|
||||
return
|
||||
}
|
||||
if err := atomicallyExecute(vc.dataChannelMapFlag, func() (err error) {
|
||||
for id, dc := range vc.videoChannelDataChannels {
|
||||
if from != id {
|
||||
if err = dc.DataChannel.SendText(string(bs)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (vc *VideoChannel) HandleDataChannelEvents(from string, eventId string, payload map[string]interface{}) (err error) {
|
||||
switch eventId {
|
||||
case VIDEO_CHANNEL_USER_VIDEO_STOP:
|
||||
if err = atomicallyExecute(vc.remoteTracksFlag, func() (err error) {
|
||||
if _, ok := vc.remoteTracks[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding remote tracks entry for id %s", from)
|
||||
return
|
||||
}
|
||||
for _, track := range vc.remoteTracks[from] {
|
||||
if track.Track.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
atomic.SwapInt32(track.rdv, 1)
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
done, errCh := vc.broadcastDatachannelMessage(from, VIDEO_CHANNEL_USER_VIDEO_STOP, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case VIDEO_CHANNEL_USER_VIDEO_RESUME:
|
||||
if err = atomicallyExecute(vc.remoteTracksFlag, func() (err error) {
|
||||
if _, ok := vc.remoteTracks[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding remote tracks entry for id %s", from)
|
||||
return
|
||||
}
|
||||
for _, track := range vc.remoteTracks[from] {
|
||||
if track.Track.Kind() == webrtc.RTPCodecTypeVideo {
|
||||
atomic.SwapInt32(track.rdv, 0)
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
done, errCh := vc.broadcastDatachannelMessage(from, VIDEO_CHANNEL_USER_VIDEO_RESUME, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case VIDEO_CHANNEL_USER_MUTE:
|
||||
if err = atomicallyExecute(vc.remoteTracksFlag, func() (err error) {
|
||||
if _, ok := vc.remoteTracks[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding remote tracks entry for id %s", from)
|
||||
return
|
||||
}
|
||||
for _, track := range vc.remoteTracks[from] {
|
||||
if track.Track.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
atomic.SwapInt32(track.rdv, 1)
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
done, errCh := vc.broadcastDatachannelMessage(from, VIDEO_CHANNEL_USER_MUTE, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case VIDEO_CHANNEL_USER_UNMUTE:
|
||||
if err = atomicallyExecute(vc.remoteTracksFlag, func() (err error) {
|
||||
if _, ok := vc.remoteTracks[from]; !ok {
|
||||
err = fmt.Errorf("no corresponding remote tracks entry for id %s", from)
|
||||
return
|
||||
}
|
||||
for _, track := range vc.remoteTracks[from] {
|
||||
if track.Track.Kind() == webrtc.RTPCodecTypeAudio {
|
||||
atomic.SwapInt32(track.rdv, 0)
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
done, errCh := vc.broadcastDatachannelMessage(from, VIDEO_CHANNEL_USER_UNMUTE, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case VIDEO_CHANNEL_USER_SPEAKING:
|
||||
done, errCh := vc.broadcastDatachannelMessage(from, VIDEO_CHANNEL_USER_SPEAKING, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
case VIDEO_CHANNEL_USER_STOPPED_SPEAKING:
|
||||
done, errCh := vc.broadcastDatachannelMessage(from, VIDEO_CHANNEL_USER_STOPPED_SPEAKING, map[string]interface{}{
|
||||
"userId": from,
|
||||
})
|
||||
select {
|
||||
case <-done:
|
||||
case err = <-errCh:
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
1038
zoneVideoChannelsHandler.go
Normal file
1038
zoneVideoChannelsHandler.go
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user