ModelFetcher.ts 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. import { DeferredPromise } from "@common/DeferredPromise";
  2. import { forEachIn } from "@common/utils/forEachIn";
  3. import { useWebsocketStore } from "@/stores/websocket";
  4. export interface ModelFetcherRequest {
  5. promise: DeferredPromise;
  6. payload: {
  7. modelName: string;
  8. modelIds: string[];
  9. };
  10. }
  11. // TODO combine the ModelFetcher and the ModelPermissionFetcher
  12. /**
  13. * Class used for fetching models in bulk, every 25ms max, per model type
  14. * So if we tried to fetch 100 different minifiedUser models separately, it would do only 1 request to fetch the models, not 100 separate ones
  15. */
  16. export class ModelFetcher {
  17. private static requestsQueued: ModelFetcherRequest[] = [];
  18. private static timeoutActive = false;
  19. private static responseCache = {};
  20. private static fetch() {
  21. // If there is no other timeout running, indicate we will run one. Otherwise, return, as a timeout is already running
  22. if (!this.timeoutActive) this.timeoutActive = true;
  23. else return;
  24. setTimeout(() => {
  25. // Reset timeout active, so another one can run
  26. this.timeoutActive = false;
  27. // Make a copy of all requests currently queued, and then take those requests out of the queue so we can request them
  28. const requests = this.requestsQueued;
  29. this.requestsQueued = [];
  30. // Splits the requests per model
  31. const requestsPerModel = {};
  32. requests.forEach(request => {
  33. const { modelName } = request.payload;
  34. if (!Array.isArray(requestsPerModel[modelName]))
  35. requestsPerModel[modelName] = [];
  36. requestsPerModel[modelName].push(request);
  37. });
  38. const modelNames = Object.keys(requestsPerModel);
  39. const { runJob } = useWebsocketStore();
  40. // TODO somehow make the following forEachIn run at the same time for all modelNames
  41. // Runs the requests per model
  42. forEachIn(modelNames, async modelName => {
  43. // All already cached model ids
  44. let cachedModelIds = Object.keys(this.responseCache[modelName]);
  45. // Gets a unique list of all model ids for the current model that we want to request permissions for, that are not already cached
  46. const modelIds = Array.from(
  47. new Set(
  48. requestsPerModel[modelName].flatMap(
  49. request => request.payload.modelIds
  50. )
  51. )
  52. ).filter(
  53. (modelId: string) => !cachedModelIds.includes(modelId)
  54. );
  55. // Only do a request if more than one model isn't already cached
  56. if (modelIds.length > 0) {
  57. console.log(`Requesting model ids`, modelName, modelIds);
  58. const result = (await runJob(
  59. `data.${modelName}.findManyById`,
  60. {
  61. _ids: modelIds
  62. }
  63. )) as any[];
  64. // Cache the responses for the requested model ids
  65. modelIds.forEach(modelId => {
  66. const model = result.find(
  67. model => model._id === modelId
  68. );
  69. console.log(`Caching ${modelName}.${modelId}`, model);
  70. this.responseCache[modelName][modelId] = model;
  71. });
  72. }
  73. const requests = requestsPerModel[modelName];
  74. // For all requests, resolve the deferred promise with the returned model(s) that was requested
  75. requests.forEach(request => {
  76. const { payload, promise } = request;
  77. const { modelIds } = payload;
  78. const models = modelIds
  79. .map(modelId => this.responseCache[modelName][modelId])
  80. .filter(model => model);
  81. promise.resolve(models);
  82. });
  83. // A unique list of model ids that are will be requested in the next batch for the current model type
  84. const queuedModelIds = Array.from(
  85. new Set(
  86. this.requestsQueued
  87. .filter(request => request.payload.modelName)
  88. .flatMap(request => request.payload.modelIds)
  89. )
  90. );
  91. // A list of model ids responses currently cached
  92. cachedModelIds = Object.keys(this.responseCache[modelName]);
  93. // A list of the cached model responses that can safely be deleted, because no queued up request needs it
  94. const cachedModelIdsToDelete = cachedModelIds.filter(
  95. cachedModelId => !queuedModelIds.includes(cachedModelId)
  96. );
  97. console.log(`Queued model ids`, modelName, queuedModelIds);
  98. console.log(`Cached model ids`, modelName, cachedModelIds);
  99. console.log(
  100. `Cached model ids to delete`,
  101. modelName,
  102. cachedModelIdsToDelete
  103. );
  104. // TODO In theory, we could check if any of the queued requests can be resolved here. Not worth it at the moment.
  105. cachedModelIdsToDelete.forEach(cachedModelIdToDelete => {
  106. delete this.responseCache[modelName][cachedModelIdToDelete];
  107. });
  108. });
  109. }, 25);
  110. }
  111. public static fetchModelsByIds(modelName: string, modelIds: string[]) {
  112. this.responseCache[modelName] ??= {};
  113. return new Promise(resolve => {
  114. const promise = new DeferredPromise();
  115. // Listens for the deferred promise response, before we actually push and fetch
  116. promise.promise.then(result => {
  117. resolve(result);
  118. });
  119. // Pushes the request to the queue
  120. this.requestsQueued.push({
  121. payload: {
  122. modelName,
  123. modelIds
  124. },
  125. promise
  126. });
  127. // Calls the fetch function, which will start a timeout if one isn't already running, which will actually request the model
  128. this.fetch();
  129. });
  130. }
  131. }