Add thrift_base64_transport which writes base64 encoded data
Summary:
This is to make it easy to run Hadoop mapreduces using Hadoop Streaming on thrift-serialized structs
without implementing any special file splitter or anything
Test plan: test_disklog:t_base64()
git-svn-id: https://svn.apache.org/repos/asf/incubator/thrift/trunk@666466 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/test/erl/src/test_disklog.erl b/test/erl/src/test_disklog.erl
index 78b792c..81b7b50 100644
--- a/test/erl/src/test_disklog.erl
+++ b/test/erl/src/test_disklog.erl
@@ -28,3 +28,35 @@
ok.
+
+
+t_base64() ->
+ {ok, TransportFactory} =
+ thrift_disk_log_transport:new_transport_factory(
+ test_disklog,
+ [{file, "/tmp/test_b64_log"},
+ {size, {1024*1024, 10}}]),
+ {ok, B64Factory} =
+ thrift_base64_transport:new_transport_factory(TransportFactory),
+ {ok, BufFactory} =
+ thrift_buffered_transport:new_transport_factory(B64Factory),
+ {ok, ProtocolFactory} = thrift_binary_protocol:new_protocol_factory(
+ BufFactory, []),
+ {ok, Client} = thrift_client:start_link(ProtocolFactory, thriftTest_thrift),
+
+ io:format("Client started~n"),
+
+ % We have to make async calls into this client only since otherwise it will try
+ % to read from the disklog and go boom.
+ {ok, ok} = thrift_client:call(Client, testAsync, [16#deadbeef]),
+ io:format("Call written~n"),
+
+ % Use the send_call method to write a non-async call into the log
+ ok = thrift_client:send_call(Client, testString, [<<"hello world">>]),
+ io:format("Non-async call sent~n"),
+
+ ok = thrift_client:close(Client),
+ io:format("Client closed~n"),
+
+ ok.
+