我需要在不同的端口上部署ml模型。 它是请求预测值的代码的一部分。你知道吗
首款车型:
def main():
p = argparse.ArgumentParser(description="Searcher - For sudrf.ru")
p.add_argument('-c', '--config', metavar='<config.yaml>', dest='config_file', required=True,
help='Config file.')
p.add_argument('--loglevel', metavar='<LOG_LEVEL>', dest='log_level', choices=['INFO', 'info', 'DEBUG', 'debug'], default='info',
help='Logger level: info|debug')
args = p.parse_args()
print('Config file: "%s"' % args.config_file)
config = Config(args.config_file)
if not config.is_loaded():
print(config.get_errmsg())
return 1
# create pid file
pidlock = PidLock('courts_search_httpservice.pid')
pid = pidlock.getpid()
if pid:
print('Service is already running with pid %s' % pid)
return 1
pidlock.lock()
print('PID file: %s' % pidlock.get_pidfile())
logging.basicConfig(level=args.log_level.upper(), format='%(asctime)s:courts_search:%(levelname)s\t%(message)s',
filemode='w', filename=config.fields.get('log_file', 'court_search.log'))
# bind on host-port
host = config.fields.get('host', '0.0.0.0')
port = config.fields.get('port', 8080)
logging.info('Binding on address %s:%s' % (host, port))
http_handler_type = HttpHandler
http_handler_type.error_content_type = CONTENT_TYPE
server = ThreadedHTTPServer((host, port), http_handler_type)
# init indexer and query parser
# TODO: fixup path if it's not absolute
logging.info('Loading model' + config.fields['graph_file'])
server.model = Model(config.fields['checkpoint_path'], config.fields['graph_file'], config.fields['data_file'], config.fields['threshold_l'], config.fields['threshold_h'])
logging.info('Done.')
# start serving
print('Starting server, use <Ctrl-C> to stop')
logging.info('Processing incoming requests...')
try:
server.serve_forever()
except KeyboardInterrupt:
logging.info('[STOP] KeyboardInterrupt received')
server.socket.close()
return 0
我试图将port
更改为8000
,但它会返回
Service is already running with pid 19931
我应该在那里更改什么以并行运行两个模型? 我不能把唯一的端口改成并行的?你知道吗
目前没有回答
相关问题 更多 >
编程相关推荐