AiBot.py 3.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. """
  4. @Time : 2024/10/18 10:04
  5. @File : AiBot.py
  6. @Desc :
  7. """
  8. import sys
  9. sys.path.append("..")
  10. from config import (
  11. get_logger,
  12. INTENT_ERROR,
  13. END,
  14. HANGUP,
  15. TRANSFER
  16. )
  17. from fastapi import FastAPI
  18. import uvicorn
  19. from interface import reqRobot, ChatResponse
  20. from database import *
  21. from scene import Dialog,Msg
  22. from entity import Error
  23. import traceback
  24. from util import insert_log, timetic
  25. from prometheus_fastapi_instrumentator import Instrumentator
  26. from prometheus_client import Histogram
  27. import time
  28. logger=get_logger("log")
  29. app = FastAPI()
  30. # 服务监控,qps,latent, cpu利用率
  31. Instrumentator().instrument(app).expose(app)
  32. # from util.registry import AI_BOT_REQUEST_LATENCY
  33. AI_BOT_REQUEST_LATENCY = Histogram('ai_bot_request_latency', '文本机器人接口耗时',['bid'])
  34. @app.post("/botservice")
  35. @timetic
  36. def botservice(reqbot:reqRobot):
  37. start_time = time.time()
  38. bid, uid, code, asr = reqbot.taskId, reqbot.userId, reqbot.nodeId, reqbot.asrText
  39. session_id, record_id = reqbot.sessionId, reqbot.recordId
  40. logger.info("智能客服:botId: {},session:{}, uid: {}, nodeId: {}, ASR: {}".format(bid, session_id, uid, code, asr))
  41. scene = get_status_cache(uid, bid, "status", session_id)
  42. #scene.case.code="1.00"
  43. if scene is None:
  44. scene = Dialog()
  45. try:
  46. cur_code = scene.get_current_code(bid)
  47. logger.info(f"智能客服:uid:{uid},sessionId:{session_id},code:{code}, cur_code:{cur_code}")
  48. if code == cur_code:
  49. option = scene.mapping_semantics(code, asr, bid, uid, session_id)
  50. logger.info(f"智能客服:uid:{uid},sessionId:{session_id},code:{code},opt:{option}")
  51. if not option:
  52. return ChatResponse(code=Error.error_intent.value, message=INTENT_ERROR).to_json_string()
  53. msg = scene.dialogue(Msg(code=code,
  54. option=option,
  55. asr=asr,
  56. session=session_id,
  57. record=record_id,
  58. ),bid, uid,session_id)
  59. if msg.code == END:
  60. with_status_cache(uid, bid, 'result', session_id, scene)
  61. del_cache(uid, bid, 'status', session_id)
  62. insert_log(bid, uid, session_id, scene)
  63. return ChatResponse(code=200, message="success").to_json_string()
  64. else:
  65. with_status_cache(uid, bid, 'status', session_id, scene)
  66. insert_log(bid, uid, session_id, scene)
  67. if msg.action in [HANGUP, TRANSFER]:
  68. del_cache(uid, bid, 'status', session_id)
  69. with_status_cache(uid, bid, 'result', session_id, scene)
  70. return ChatResponse().from_msg(msg).to_json_string()
  71. else:
  72. msg = scene.get_current_content(bid, uid)
  73. if code == "start" or code is None:
  74. scene.case.code = cur_code
  75. with_status_cache(uid, bid, 'status', session_id, scene)
  76. insert_log(bid, uid, session_id, scene)
  77. return ChatResponse().from_msg(msg).to_json_string()
  78. except Exception as e:
  79. logger.error(e)
  80. #del_cache(uid, bid, 'status', session_id)
  81. return ChatResponse(code=Error.error.value, message="internal error").to_json_string()
  82. finally:
  83. latency = (time.time() - start_time)
  84. print(f"Latency for {bid}: {latency}") # 日志记录
  85. AI_BOT_REQUEST_LATENCY.labels(bid=bid).observe(latency)
  86. if __name__ == '__main__':
  87. uvicorn.run("AiBot:app", host='0.0.0.0', port=40072,workers=4)