From 6f30f8a46f39768503b0f298dc8e318e9e086207 Mon Sep 17 00:00:00 2001 From: Tongxuan Liu Date: Mon, 13 Jun 2022 20:41:19 +0800 Subject: [PATCH] [Docs] Update README and KafkaDataset Document. (#265) --- README.md | 11 ++++++----- docs/KafkaDataset.md | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index eefff15fe43..82e0a3398dd 100644 --- a/README.md +++ b/README.md @@ -23,15 +23,16 @@ DeepRec has super large-scale distributed training capability, supporting model - Multi-tier Hybrid Embedding Storage #### **Performance Optimization** - Distributed Training Framework Optimization, such as grpc+seastar, FuseRecv, StarServer, HybridBackend etc. - - Runtime Optimization, such as CPU memory allocator (PRMalloc), GPU memory allocator etc. + - Runtime Optimization, such as CPU memory allocator (PRMalloc), GPU memory allocator, Cost based and critical path first Executor etc. - Operator level optimization, such as BF16 mixed precision optimization, sparse operator optimization and EmbeddingVariable on PMEM and GPU, new hardware feature enabling, etc. - Graph level optimization, such as AutoGraphFusion, SmartStage, AutoPipeline, StrutureFeature, MicroBatch etc. - Compilation optimization, support BladeDISC, XLA etc. #### **Deploy and Serving** - - Incremental model loading and exporting - - Super-scale sparse model distributed serving - - Multi-tier hybrid storage and multi backend supported - - Online deep learning with low latency + - Incremental model loading and exporting. + - Super-scale sparse model distributed serving. + - Multi-tier hybrid storage and multi backend supported. + - Online deep learning with low latency. + - High performance processor with SessionGroup supported. *** diff --git a/docs/KafkaDataset.md b/docs/KafkaDataset.md index 5fa9d880d61..28040d7a209 100644 --- a/docs/KafkaDataset.md +++ b/docs/KafkaDataset.md @@ -41,7 +41,7 @@ class KafkaDataset(dataset_ops.Dataset): import tensorflow as tf from tensorflow.python.data.ops import iterator_ops -kafka_dataset = tf.data.KafkaDataset(topics=["dewu_1_partition:0:0:-1"], +kafka_dataset = tf.data.KafkaDataset(topics=["test_1_partition:0:0:-1"], group="test_group1", timeout=100, eof=False) @@ -146,4 +146,4 @@ with tf.Session() as sess: for _ in range(100): x = sess.run(next_elements, feed_dict={handle: train_handle}) print(x) -``` \ No newline at end of file +```