facebook c++网络库wangle调研
Post on 20-Feb-2017
167 Views
Preview:
TRANSCRIPT
Wangle调研chenxiaojie@qiyi.com
2016.07.16
Content
• Example
• Features
• Architecture
• Summary
Example
Server
ServerBootstrap<SerializePipeline>()
// .pipeline(RoutingDataPipelineFactory())
.childPipeline(RpcPipelineFactory())
// .group(IOThreadPoolExecutor(), IOThreadPoolExecutor())
// .acceptorConfig(…)
.bind(6666)
.waitForStop();
Server Bootstrap
Pipelineclass RpcPipelineFactory : public … {public:SerializePipelinePtr newPipeline(AsyncTransport sock) {
auto pipeline = SerializePipeline::create();pipeline->addBack(AsyncSocketHandler(sock));pipeline->addBack(EventBaseHandler());pipeline->addBack(LengthFieldBasedFrameDecoder());pipeline->addBack(LengthFieldPrepender());pipeline->addBack(ServerSerializeHandler());pipeline->addBack(MultiplexServerDispatcher<Bonk, Xtruct>(&service_));pipeline->finalize();return pipeline;
}private:RpcService<Bonk, Xtruct> service_;
};
Pipeline
LengthFieldBasedFrameDecoder
EventBaseHandler
AsyncSocketHandler
event
LengthFieldPrepender
ServerSerializeHandler
MultiplexServerDispatcher
Service
LengthFieldBasedFrameDecoder
EventBaseHandler
AsyncSocketHandler
LengthFieldPrepender
ServerSerializeHandler
MultiplexServerDispatcher
RPC Protocolclass ServerSerializeHandler : public … {public:void read(Context* ctx, IOBuf msg) override {Bonk received;ser.deserialize<Bonk>(msg, &received);ctx->fireRead(received);
}Future<Unit> write(Context* ctx, Xtruct b) override {string out;ser.serialize<Xtruct>(b, &out);return ctx->fireWrite(out);
}private:ThriftSerializerCompact<> ser;
};
RPC Handler
class RpcService : public Service<Bonk, Xtruct> {public:virtual Future<Xtruct> operator()(Bonk request) override {
return makeFuture().via(getIOExecutor())
.then(getCouchbaseResult()).via(getCpuExecutor())
.then(calculate1()).via(getIOExecutor())
.then(getCtrServerResult())
.then(calculate2())
.then(getRedisResult())
.then(calculate3())
.then(sendResponse());}
};
Client
ClientBootstrap<SerializePipeline> client;auto pipeline = client
.client.group(IOThreadPoolExecutor>());
.client.pipelineFactory(RpcPipelineFactory());
.client.connect(address).get();BonkMultiplexClientDispatcher service;
service.setPipeline(pipeline);
while (true) {Bonk request;service(request).then([](Xtruct response) {
cout << response.string_thing << endl;});
}
Pipeline
class RpcPipelineFactory : public … {public:SerializePipelinePtr newPipeline(AsyncTransport sock) override {
auto pipeline = SerializePipeline::create();pipeline->addBack(AsyncSocketHandler(sock));pipeline->addBack(EventBaseHandler());pipeline->addBack(LengthFieldBasedFrameDecoder());pipeline->addBack(LengthFieldPrepender());pipeline->addBack(ClientSerializeHandler());pipeline->finalize();return pipeline;
}};
Pipeline
LengthFieldBasedFrameDecoder
EventBaseHandler
AsyncSocketHandler
LengthFieldPrepender
ClientSerializeHandler
Service
EventBase
LengthFieldBasedFrameDecoder
EventBaseHandler
AsyncSocketHandler
LengthFieldPrepender
ClientSerializeHandler
RPC Protocolclass ClientSerializeHandler : public … {public:void read(Context* ctx, IOBuf msg) override {Xtruct received;ser.deserialize<Xtruct>(msg, &received);ctx->fireRead(received);
}Future<Unit> write(Context* ctx, Bonk b) override {string out;ser.serialize<Bonk>(b, &out);return ctx->fireWrite(out);
}private:ThriftSerializerCompact<> ser;
};
RPC Handlerclass BonkMultiplexClientDispatcher : public … {public:void read(Context* ctx, Xtruct in) override {auto search = requests_.find(in.i32_thing);auto p = move(search->second);requests_.erase(in.i32_thing);p.setValue(in);
}Future<Xtruct> operator()(Bonk arg) override {auto& p = requests_[arg.type];auto f = p.getFuture();this->pipeline_->write(arg);return f;
}private:unordered_map<int32_t, Promise<Xtruct>> requests_;
};
Features
Executor
• IOThreadPoolExecutor:• IO intensive • one event loop, notification queue(eventfd) per thread• round robin, user define• MPSC
• CPUThreadPoolExecutor:• CPU intensive • multi-threads single queue• MPMC Queue, LIFO wakeup, cache efficiency
• ThreadedExecutor• FiberIOExecutor(stackful)• FutureExecutor
MPMC Queue• LIFO Semaphore:
• approximately LIFO• multi-post, exact wakeups, fewer system calls than sem_t
• MPMC Queue:• fixed capacity, blocking or throw exception• incremental ticket based, item index = hash(ticket)• if holds item, wait for turn, else write the element• a read ticket with same growing rule• get the right version of element with ticket turn• false sharing:
• advance by stride slots per ticket
• futex:• Fast Userspace muTEXes, userspace (no competition), kernel space (wait, wakeup)
• tbb::concurrent_bounded_queue
MPMC Queue
Service
• Client Dispatcher:• SerialClientDispatcher: one request is allowed at a time• PipelinedClientDispatcher: use a promises queue for pipelining
• Server Dispatcher:• SerialServerDispatcher: one at a time synchronously• PipelinedServerDispatcher: queued with a request id until they can be sent in order• MultiplexServerDispatcher: dispatch as they come in
• Common Filter:• ExpiringFilter: expires the service after a certain amount of time• ExecutorFilter: through an executor
Overload Protection
• CoDel:• Controlled Delay• an active queue management algorithm• attacking bufferbloat• if every request has experienced queuing delay greater than the target (5ms)
during the past interval (100ms), then shed load• slough off requests which have exceeded an alternate timeout (2 * target_delay)• back to normal if delay down to 5ms• application case: RabbitMQ
Connection Routingclass NaiveRoutingDataHandler : public … {public:bool parseRoutingData(IOBufQueue& bufQueue, RoutingData& routingData)
override {auto buf = bufQueue.move();buf->coalesce();routingData.routingData = buf->data()[0];routingData.bufQueue.append(buf);return true;
}};ServerBootstrap<DefaultPipeline>
.pipeline(AcceptRoutingPipelineFactory(…));
Parallel Computingvector<Future<int>> futures;for (int channel = 0; channel < 10; ++channel) {futures.emplace_back(makeFuture().then([channel] {
int sum = 0;for (int i = 100 * channel; i < 100 * channel + 100; ++i) {sum += i;
}return sum;
}));}collectAll(futures.begin(), futures.end())
.then([](vector<Try<int>>&& parts) {for (auto& part : parts) {cout << part.value() << endl;
}});
Parallel Computingvector<Future<map<int, Entity>>> segments;for (int segment = 0; segment < 10; ++segment) {segments.emplace_back(makeFuture().then([] {
map<int, Entity> result;// fill segment resultreturn result;
}));}Future<map<int, Entity>> entities = reduce(segments, {},
[](map<int, Entity> last, map<int, Entity> current) {// merge current into last
});
Architecture
EventBase
• based on libevent::event_base
• NotificationQueue:• producer-consumer• passing messages between EventBase threads• use an eventfd
• loop():• before loop callbacks• event_base_loop• loop callbacks, busy• time measurement• run NotificationQueue events
SmoothLoopTime
• dynamically adjusting loop time
• EMA Algorithm (Exponential Moving Average)
• https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
2lastbusy busyidle idle +
= +
_ int _ int1 (1 )
idle idletime erval time erval
i iEMA EMA e e busy− −
−= × + − ×
Acceptor
• Acceptor IO Executor
• multi-IP (SO_REUSEADDR), multi-thread accept (SO_REUSEPORT)
• bind, listen, accept4 (max accept at once)• connection event callback
• adjusting connection accept rate
• put accept info into a NotificationQueue• build connection info via Connection IO Executor
• active accept pipeline (Acceptor) read
• fire child pipeline
GroupServer
Bootstrap
create
ServerWorkerPool
ServerAcceptorFactory
IOExecutor(io_group)
ServerAcceptor
createaddObserver
newAcceptor
forEachThreadthreadStarted
create
ConnectionManager
create
BindServer
BootstrapIOExecutor
(accept_group)AsyncServerSocketFactory
AsyncServerSocket
newSocketcreate
bind、listen
wait
addStartupFunction
BindServer
Bootstrap
forEacherWorker
ServerWorkerPool
EventBase(acceptor_group)
AsyncServerSocketFactory
runAddAcceptCBInEventBase
addAcceptCallback
AsyncServerSocket
addAcceptCallabck
RemoteAcceptor
create
EventBase(io_group)
runStartConsumingInEventBase
initEventfd
NotificationQueue(AsyncServerSocket)
ServerEventHandler
registerAcceptHandleraddAcceptCallback
registerEventHandler
addEventCallback
AcceptEventBase
(accept_group)
handlerReady
ServerEventHandler
AsyncServerSocket
handlerReady
accept4connectionEventCallback
acceptRateAdjust
RemoteAcceptor
tryPutMessage
EventBase(io_group)
signal
ConnectionRemoteAcceptorEventBase
(io_group) ServerAcceptor
mesageAvailable
connectionAccepted
AcceptPipeline
read
read
ServerConnection
init
ConnectionPipeline
transportActive
Connection
Procedure
Pipeline• HandlerContext:
• transmit events between handlers
• In/OutboundLink:• link handlers together in order• call handler to handle events
• PipelineContext:• derived from HandlerContext and In/OutboundLink• directions: In, Out, Both
• Pipeline<In, Out>:• HandlerContext: wrap Handler
• chain:• event (like read) --> PipelineContext (as In/OutboundLink)::read -->Handler::read -->
PipelineContext (as HandlerContext)::fireRead -->NextHandler::read --> …
Pipeline<<接口>>
HandlerBase<Context>
-attachPipeline-detachPipeline
<<接口>>
HandlerContext<In,Out>
-fireRead-fireReadEOF-fireReadException-fireTransportActive-fireTransportInactive-fireWrite-fireWriteException-fireClose
<<接口>>
InboundHandlerContext<In>
-fireRead-fireReadEOF-fireReadException-fireTransportActive-fireTransportInactive
<<接口>>
OutboundHandlerContext<Out>
-fireWrite-fireWriteException-fireClose
<<接口>>
Handler<RIn,ROut,WIn,WOut>
-read-readEOF-readException-transportActive-transportInactive-write-writeException-close
<<接口>>InboundHandler<RIn,ROut,WIn,WOut>
-read-readEOF-readException-transportActive-transportInactive
<<接口>>OutboundHandler<RIn,ROut,WIn,WOut>
-write-writeException-close
<<类>>HandlerAdapter<RIn,ROut,WIn,WOut>
<<类>>
PipelineBase
<<类>>
Pipeline<In,Out>
-read-readEOF-readException-transportActive-transportInactive-write-writeException-close
<<接口>>
PipelineContext
-attachPipeline-detachPipeline
<<接口>>InboundLink<In>
-read-readEOF-readException-transportActive-transportInactive
<<接口>>OutboundLink<Out>
-write-writeException-close
-setNextOut-setNextIn
<<类>>
ContextImplBase<Handler,Context>
<<类>>
ContextImpl<Handler>
<<类>>
InboundContextImpl<Handler><<类>>
OutboundContextImpl<Handler>
Pipeline
events PipelineContext Handler
read
read
fireRead
NextHandler
read
NextHandler
fireRead
read
Futures
• f.via(e).then(a).then(b)• via:
• set executor
• then:• make a new future/promise pair• set callback:
• function result will be set in the new promise
• return its future
Core
• shared state object for Future and Promise• reference count: Future, Promise, Executor• callback and result• activated by Future Destructor• Finite State Machine:
• Try:• value and exception
Chained Future
Future<R> then(Func func) {Promise<R> p;Future<R> f = p.getFuture();setCallback([func, p](Try t) {
if (t.hasException) {p.setException(t.exception);
} else {p.setWith([&]() { return func(t.get()); });
}});return f;
}
Chained Future
Future<vector<string>> f = makeFuture<int>(3).then([](int i) {
return to_string(i);}).then([](string s) {
return vector<string>(s, 10);});
Chained Future
Future<R> onError(Func func) {Promise<R> p;Future<R> f = p.getFuture();setCallback([func, p](Try t) {
if (!t.withException([](Exp e) {p.setWith([&] { return func(e); });
})) {p.setTry(t);
}}return f;
}
Summary
Proxygen
• Facebook's C++ HTTP Libraries• comprises the core C++ HTTP abstractions used at Facebook
Netty
Reference
• https://github.com/facebook/wangle• https://github.com/facebook/folly• https://github.com/facebook/proxygen• http://twitter.github.io/finagle• http://netty.io
Thank you
top related