Skip to content

Commit

Permalink
delete frequent log and modify setting
Browse files Browse the repository at this point in the history
Signed-off-by: wzh <[email protected]>
  • Loading branch information
zhihuiwan committed Jan 17, 2020
1 parent 998a18c commit 158e58b
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 43 deletions.
4 changes: 2 additions & 2 deletions fate_flow/driver/job_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def run_do(self):
try:
running_tasks = job_utils.query_task(status='running', run_ip=get_lan_ip())
stop_job_ids = set()
detect_logger.info('start to detect running job..')
# detect_logger.info('start to detect running job..')
for task in running_tasks:
try:
process_exist = job_utils.check_job_process(int(task.f_run_pid))
Expand Down Expand Up @@ -61,7 +61,7 @@ def run_do(self):
src_role=None,
json_body={'job_id': job_id},
work_mode=job_work_mode)
schedule_logger(job_id).info('send stop job {} command'.format(job_id))
# schedule_logger(job_id).info('send stop job {} command'.format(job_id))
except Exception as e:
detect_logger.exception(e)
finally:
Expand Down
4 changes: 2 additions & 2 deletions fate_flow/manager/tracking.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def get_metric_list(self, job_level: bool = False):
'and f_task_id = "{}"'.format(
self.get_table_index(), self.job_id, self.component_name if not job_level else 'dag', self.role,
self.party_id, self.task_id)
stat_logger.info(query_sql)
# stat_logger.info(query_sql)
cursor = DB.execute_sql(query_sql)
for row in cursor.fetchall():
metrics[row[0]] = metrics.get(row[0], [])
Expand Down Expand Up @@ -291,7 +291,7 @@ def read_data_from_db(self, metric_namespace: str, metric_name: str, data_type,
self.get_table_index(), self.job_id, self.component_name if not job_level else 'dag', self.role,
self.party_id, self.task_id, metric_namespace, metric_name, data_type)
cursor = DB.execute_sql(query_sql)
stat_logger.info(query_sql)
# stat_logger.info(query_sql)
for row in cursor.fetchall():
yield deserialize_b64(row[0]), deserialize_b64(row[1])
except Exception as e:
Expand Down
88 changes: 49 additions & 39 deletions fate_flow/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,49 +24,20 @@

from fate_flow.utils.setting_utils import CenterConfig

log_utils.LoggerFactory.set_directory(os.path.join(file_utils.get_project_base_directory(), 'logs', 'fate_flow'))
stat_logger = log_utils.getLogger("fate_flow_stat")
detect_logger = log_utils.getLogger("fate_flow_detect")
access_logger = log_utils.getLogger("fate_flow_access")

'''
Constants
'''

API_VERSION = "v1"
ROLE = 'fateflow'
SERVERS = 'servers'
SERVINGS_ZK_PATH = '/FATE-SERVICES/serving/online/publishLoad/providers'
FATE_FLOW_ZK_PATH = '/FATE-SERVICES/flow/online/transfer/providers'
FATE_MANAGER_GET_NODE_INFO = '/node/info'
FATE_MANAGER_NODE_CHECK = '/node/management/check'
MAIN_MODULE = os.path.relpath(__main__.__file__)
SERVER_MODULE = 'fate_flow_server.py'
TASK_EXECUTOR_MODULE = 'driver/task_executor.py'
WORK_MODE = 0
USE_LOCAL_DATABASE = True
# Local authentication switch
USE_AUTHENTICATION = False
PRIVILEGE_COMMAND_WHITELIST = []
# Node check switch
CHECK_NODES_IDENTITY = False
MAX_CONCURRENT_JOB_RUN = 5
MAX_CONCURRENT_JOB_RUN_HOST = 10
DEFAULT_WORKFLOW_DATA_TYPE = ['train_input', 'data_input', 'id_library_input', 'model', 'predict_input',
'predict_output', 'evaluation_output', 'intersect_data_output']
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
DEFAULT_GRPC_OVERALL_TIMEOUT = 60 * 1000 # ms
JOB_DEFAULT_TIMEOUT = 7 * 24 * 60 * 60
HEADERS = {
'Content-Type': 'application/json',
}

IP = '0.0.0.0'
GRPC_PORT = 9360
HTTP_PORT = 9380
ZOOKEEPER_HOSTS = ['127.0.0.1:2181']
# standalone job will be send to the standalone job server when FATE-Flow work on cluster deploy mode,
# but not the port for FATE-Flow on standalone deploy mode.
CLUSTER_STANDALONE_JOB_SERVER_PORT = 9381
WORK_MODE = 0
USE_LOCAL_DATABASE = True
USE_AUTHENTICATION = False
USE_CONFIGURATION_CENTER = False
CHECK_NODES_IDENTITY = False
PRIVILEGE_COMMAND_WHITELIST = []
REDIS_QUEUE_DB_INDEX = 0

DATABASE = {
'name': 'fate_flow',
Expand All @@ -85,11 +56,50 @@
'max_connections': 500
}

REDIS_QUEUE_DB_INDEX = 0
'''
Constants
'''
API_VERSION = "v1"
ROLE = 'fateflow'
SERVERS = 'servers'
MAIN_MODULE = os.path.relpath(__main__.__file__)
SERVER_MODULE = 'fate_flow_server.py'
TASK_EXECUTOR_MODULE = 'driver/task_executor.py'
DEFAULT_WORKFLOW_DATA_TYPE = ['train_input', 'data_input', 'id_library_input', 'model', 'predict_input',
'predict_output', 'evaluation_output', 'intersect_data_output']
HEADERS = {
'Content-Type': 'application/json',
}
# fate-serving
SERVINGS_ZK_PATH = '/FATE-SERVICES/serving/online/publishLoad/providers'
FATE_FLOW_ZK_PATH = '/FATE-SERVICES/flow/online/transfer/providers'
# fate-manager
FATE_MANAGER_GET_NODE_INFO = '/node/info'
FATE_MANAGER_NODE_CHECK = '/node/management/check'

# logger
log_utils.LoggerFactory.set_directory(os.path.join(file_utils.get_project_base_directory(), 'logs', 'fate_flow'))
stat_logger = log_utils.getLogger("fate_flow_stat")
detect_logger = log_utils.getLogger("fate_flow_detect")
access_logger = log_utils.getLogger("fate_flow_access")


"""
Services
Services
"""
IP = '0.0.0.0'
GRPC_PORT = 9360
HTTP_PORT = 9380

# standalone job will be send to the standalone job server when FATE-Flow work on cluster deploy mode,
# but not the port for FATE-Flow on standalone deploy mode.
CLUSTER_STANDALONE_JOB_SERVER_PORT = 9381

# zookeeper
USE_CONFIGURATION_CENTER = False
ZOOKEEPER_HOSTS = ['127.0.0.1:2181']

# services ip and port
server_conf = file_utils.load_json_conf("arch/conf/server_conf.json")
PROXY_HOST = server_conf.get(SERVERS).get('proxy').get('host')
PROXY_PORT = server_conf.get(SERVERS).get('proxy').get('port')
Expand Down

0 comments on commit 158e58b

Please sign in to comment.