aboutsummaryrefslogtreecommitdiff
path: root/src/main/java/gregtech/api/util/GT_ParallelHelper.java
blob: 4ae33cd48d70d6f3d4f29eaa9e39fbdd485ac226 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
package gregtech.api.util;

import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.PriorityQueue;

import net.minecraft.inventory.IInventory;
import net.minecraft.item.ItemStack;
import net.minecraftforge.fluids.FluidStack;

import org.apache.commons.lang3.tuple.MutablePair;
import org.apache.commons.lang3.tuple.MutableTriple;

import com.gtnewhorizon.gtnhlib.util.map.ItemStackMap;

import gregtech.api.metatileentity.implementations.GT_MetaTileEntity_Hatch;
import gregtech.api.metatileentity.implementations.GT_MetaTileEntity_Hatch_Output;
import gregtech.api.metatileentity.implementations.GT_MetaTileEntity_MultiBlockBase;
import gregtech.api.multitileentity.multiblock.base.MultiBlockController;
import gregtech.api.objects.XSTR;
import gregtech.common.tileentities.machines.GT_MetaTileEntity_Hatch_OutputBus_ME;
import gregtech.common.tileentities.machines.GT_MetaTileEntity_Hatch_Output_ME;

public class GT_ParallelHelper {

    /**
     * @mMachineMeta a MetaTileEntity Controller
     */
    private GT_MetaTileEntity_MultiBlockBase mMachineMeta;
    /**
     * @mMachineMulti a MultiTileEntity Controller
     */
    private MultiBlockController<?> mMachineMulti;
    /**
     * @mRecipe Recipe used when trying to calculate parallels
     */
    private GT_Recipe mRecipe;
    /**
     * @mAvailableEUt EUt available to the multiblock (This should be the total eut available)
     */
    private long mAvailableEUt;
    /**
     * @mCurrentParallel The current parallel possible for the multiblock
     * @mMaxParallel The maximum possible parallel possible for the multiblock
     * @mBatchModifier The Batch Modifier applied when batch mode is enabled. 1 does nothing. 2 doubles max possible
     *                 parallel, but also duration
     */
    private int mCurrentParallel = 0, mMaxParallel = 1, mBatchModifier = 1;
    /**
     * @mItemInputs The inputs of the multiblock for the current recipe check
     * @mItemOutputs The outputs of the recipe with the applied parallel
     */
    private ItemStack[] mItemInputs, mItemOutputs;
    /**
     * @mFluidInputs The inputs of the multiblock for the current recipe check
     * @mFluidOutputs The outputs of the recipe with the applied parallel
     */
    private FluidStack[] mFluidInputs, mFluidOutputs;
    /**
     * @mVoidProtection Does the multi have void protection enabled
     * @mConsume Should the Parallel Helper automatically consume for the multi
     * @mBatchMode Is batch mode turned on?
     * @mCalculateOutputs Should the Parallel Helper automatically calculate the outputs of the recipe with current
     *                    parallel
     * @mBuilt Has the Parallel Helper been built?
     */
    private boolean mVoidProtection, mConsume, mBatchMode, mCalculateOutputs, mBuilt;
    /**
     * @mDurationMultiplier What is the duration multiplier with batch mode enabled
     * @mEUtModifier Modifier which is applied on the recipe eut. Useful for GT++ machines
     */
    private float mDurationMultiplier, mEUtModifier = 1;

    public GT_ParallelHelper() {}

    /**
     * Enables void protection on a metatile multiblock. Experimental! Still needs to be worked on
     */
    public GT_ParallelHelper enableVoidProtection(GT_MetaTileEntity_MultiBlockBase aMachineMeta) {
        mVoidProtection = true;
        mMachineMeta = aMachineMeta;
        return this;
    }

    /**
     * Enables void protection on a multitile multiblock. Experimental! Still needs to be worked on
     */
    public GT_ParallelHelper enableVoidProtection(MultiBlockController<?> aMachineMulti) {
        mVoidProtection = true;
        mMachineMulti = aMachineMulti;
        return this;
    }

    /**
     * Sets the recipe, which will be used for the parallel calculation
     */
    public GT_ParallelHelper setRecipe(GT_Recipe aRecipe) {
        mRecipe = aRecipe;
        return this;
    }

    /**
     * Sets the items available for the recipe check
     */
    public GT_ParallelHelper setItemInputs(ItemStack... aItemInputs) {
        mItemInputs = aItemInputs;
        return this;
    }

    /**
     * Sets the fluid inputs available for the recipe check
     */
    public GT_ParallelHelper setFluidInputs(FluidStack... aFluidInputs) {
        mFluidInputs = aFluidInputs;
        return this;
    }

    /**
     * Sets the available eut when trying for more parallels
     */
    public GT_ParallelHelper setAvailableEUt(long aAvailableEUt) {
        mAvailableEUt = aAvailableEUt;
        return this;
    }

    /**
     * Sets the modifier for recipe eut. 1 does nothing 0.9 is 10% less. 1.1 is 10% more
     */
    public GT_ParallelHelper setEUtModifier(float aEUtModifier) {
        mEUtModifier = aEUtModifier;
        return this;
    }

    /**
     * Consume inputs when trying for parallels
     */
    public GT_ParallelHelper enableConsumption() {
        mConsume = true;
        return this;
    }

    /**
     * Sets the MaxParallel a multi can handle
     */
    public GT_ParallelHelper setMaxParallel(int aMaxParallel) {
        mMaxParallel = aMaxParallel;
        return this;
    }

    /**
     * Enables Batch mode. Can do up to an additional processed recipes of mCurrentParallel * mBatchModifier A batch
     * modifier of 1 does nothing
     */
    public GT_ParallelHelper enableBatchMode(int aBatchModifier) {
        mBatchMode = true;
        mBatchModifier = aBatchModifier;
        return this;
    }

    /**
     * Enables the outputs to be calculated with its current Parallels, useful if one isn't doing anything special with
     * outputs
     */
    public GT_ParallelHelper enableOutputCalculation() {
        mCalculateOutputs = true;
        return this;
    }

    /**
     * Finishes the GT_ParallelHelper. Anything changed after this will not effect anything
     */
    public GT_ParallelHelper build() {
        if (mBuilt) {
            throw new IllegalStateException("Tried to build twice");
        }
        mBuilt = true;
        determineParallel();
        return this;
    }

    /**
     * @return The current parallels possible by the multiblock
     */
    public int getCurrentParallel() {
        if (!mBuilt) {
            throw new IllegalStateException("Tried to get parallels before building");
        }
        return mCurrentParallel;
    }

    /**
     * @return The duration multiplier if batch mode was enabled for the multiblock
     */
    public float getDurationMultiplier() {
        if (!mBuilt) {
            throw new IllegalStateException("Tried to get duration multiplier before building");
        }
        if (mBatchMode) {
            return mDurationMultiplier;
        }
        return 1;
    }

    /**
     * @return The ItemOutputs from the recipe
     */
    public ItemStack[] getItemOutputs() {
        if (!mBuilt || !mCalculateOutputs) {
            throw new IllegalStateException(
                "Tried to get item outputs before building or without enabling calculation of outputs");
        }
        return mItemOutputs;
    }

    /**
     * @return The FluidOutputs from the recipe
     */
    public FluidStack[] getFluidOutputs() {
        if (!mBuilt || !mCalculateOutputs) {
            throw new IllegalStateException(
                "Tried to get fluid outputs before building or without enabling calculation of outputs");
        }
        return mFluidOutputs;
    }

    /**
     * Called by build(). Determines the parallels and everything else that needs to be done at build time
     */
    protected void determineParallel() {
        if (mRecipe.mEUt > mAvailableEUt) {
            return;
        }
        ItemStack[] tItemInputs = null;
        FluidStack[] tFluidInputs = null;
        boolean tMEOutputBus = false;
        boolean tMEOutputHatch = false;
        long tCurrentUsage = 0;
        // see if people want to consume their inputs with the Parallel Helper or not
        if (mConsume) {
            tItemInputs = mItemInputs;
            tFluidInputs = mFluidInputs;
        } else {
            if (mItemInputs == null) {
                tItemInputs = new ItemStack[] {};
            } else {
                tItemInputs = new ItemStack[mItemInputs.length];
                for (int i = 0; i < mItemInputs.length; i++) {
                    tItemInputs[i] = mItemInputs[i].copy();
                }
            }

            if (mFluidInputs == null) {
                mFluidInputs = new FluidStack[] {};
            } else {
                tFluidInputs = new FluidStack[mFluidInputs.length];
                for (int i = 0; i < mFluidInputs.length; i++) {
                    tFluidInputs[i] = mFluidInputs[i].copy();
                }
            }
        }
        if (mBatchMode) {
            mMaxParallel *= mBatchModifier;
        }
        // Let's look at how many parallels we can get with void protection
        if (mVoidProtection) {
            for (GT_MetaTileEntity_Hatch tHatch : mMachineMeta.mOutputBusses) {
                if (tHatch instanceof GT_MetaTileEntity_Hatch_OutputBus_ME) {
                    tMEOutputBus = true;
                    break;
                }
            }

            for (GT_MetaTileEntity_Hatch tHatch : mMachineMeta.mOutputHatches) {
                if (tHatch instanceof GT_MetaTileEntity_Hatch_Output_ME) {
                    tMEOutputHatch = true;
                    break;
                }
            }
            if (!tMEOutputBus) {
                mMaxParallel = Math.min(calculateMaxParallelsForBusses(), mMaxParallel);
            }

            if (!tMEOutputHatch) {
                mMaxParallel = Math.min(calculateMaxParallelsForHatches(), mMaxParallel);
            }
        }

        float tRecipeEUt = mRecipe.mEUt * mEUtModifier;
        // Consume inputs to determine normal parallel
        for (; mCurrentParallel < mMaxParallel / mBatchModifier
            && tCurrentUsage < (mAvailableEUt - tRecipeEUt); mCurrentParallel++) {
            if (mRecipe.isRecipeInputEqual(true, false, tFluidInputs, tItemInputs)) {
                tCurrentUsage += tRecipeEUt;
            } else {
                break;
            }
        }

        // If Batch Mode is enabled determine how many extra parallels we can get
        if (mBatchMode) {
            int tExtraParallels = 0;
            while (tExtraParallels < mCurrentParallel * (mBatchModifier - 1)
                && mRecipe.isRecipeInputEqual(false, false, tFluidInputs, tItemInputs)) {
                mRecipe.isRecipeInputEqual(true, false, tFluidInputs, tItemInputs);
                tExtraParallels++;
            }
            mDurationMultiplier = 1.0f + (float) tExtraParallels / mCurrentParallel;
            mCurrentParallel += tExtraParallels;
        }

        // If we want to calculate outputs we do it here
        if (mCalculateOutputs) {
            if (mRecipe.mOutputs != null) {
                mItemOutputs = new ItemStack[mRecipe.mOutputs.length];
                for (int i = 0; i < mRecipe.mOutputs.length; i++) {
                    if (mRecipe.getOutputChance(i) >= XSTR.XSTR_INSTANCE.nextInt(10000)) {
                        if (mRecipe.getOutput(i) == null) {
                            mItemOutputs[i] = null;
                        } else {
                            ItemStack tItem = mRecipe.getOutput(i)
                                .copy();
                            tItem.stackSize *= mCurrentParallel;
                            mItemOutputs[i] = tItem;
                        }
                    }
                }
            }
            if (mRecipe.mFluidOutputs != null) {
                mFluidOutputs = new FluidStack[mRecipe.mFluidOutputs.length];
                for (int i = 0; i < mRecipe.mFluidOutputs.length; i++) {
                    if (mRecipe.getFluidOutput(i) == null) {
                        mFluidOutputs[i] = null;
                    } else {
                        FluidStack tFluid = mRecipe.getFluidOutput(i)
                            .copy();
                        tFluid.amount *= mCurrentParallel;
                        mFluidOutputs[i] = tFluid;
                    }
                }
            }
        }
    }

    /**
     * Calculates the max parallel for fluids if void protection is turned on
     */
    private int calculateMaxParallelsForHatches() {
        // For now we are gonna ignore MuTEs existence as there are no recipes for them
        if (mMachineMeta != null && mMachineMeta.mOutputHatches.size() >= mRecipe.mFluidOutputs.length) {
            // A map to hold the items we will be 'inputting' into the output hatches. These fluidstacks are actually
            // the
            // recipe outputs.
            Map<FluidStack, Integer> tFluidOutputMap = new HashMap<>();

            // Map that keeps track of the number of parallel crafts we can accommodate for each fluid output.
            // In the pair, we keep track of number of full crafts plus mb of fluid in a partial craft, to avoid
            // issues with floating point math not being completely accurate when summing.
            Map<FluidStack, MutablePair<Integer, Integer>> tParallels = new HashMap<>();

            // Iterate over the outputs, calculating require stack spacing they will require.
            for (FluidStack aY : mRecipe.mFluidOutputs) {
                if (aY == null) {
                    continue;
                }
                tFluidOutputMap.merge(aY, aY.amount, Integer::sum);
                tParallels.put(aY, new MutablePair<>(0, 0));
            }

            if (tFluidOutputMap.isEmpty()) {
                // nothing to output, bail early
                return mMaxParallel;
            }

            for (GT_MetaTileEntity_Hatch_Output tHatch : mMachineMeta.mOutputHatches) {
                int tSpaceLeft = tHatch.getCapacity() - tHatch.getFluidAmount();

                // check if hatch filled
                if (tSpaceLeft <= 0) continue;

                // check if hatch is empty and unrestricted
                if (tHatch.mMode == 0 && tHatch.getFluidAmount() == 0) continue;

                String tLockedFluidName = tHatch.getLockedFluidName();
                for (Entry<FluidStack, MutablePair<Integer, Integer>> entry : tParallels.entrySet()) {
                    FluidStack tFluidOutput = entry.getKey();
                    if (GT_ModHandler.isSteam(tFluidOutput)) {
                        if (!tHatch.outputsSteam()) {
                            continue;
                        }
                    } else {
                        if (!tHatch.outputsLiquids()) {
                            continue;
                        }
                        if (tHatch.isFluidLocked() && tLockedFluidName != null
                            && !tLockedFluidName.equals(
                                tFluidOutput.getFluid()
                                    .getName())) {
                            continue;
                        }
                    }
                    // this fluid is not prevented by restrictions on output hatch
                    if (tHatch.getFluidAmount() == 0 || GT_Utility.areFluidsEqual(tHatch.getFluid(), tFluidOutput)) {
                        MutablePair<Integer, Integer> tParallel = entry.getValue();
                        Integer tCraftSize = tFluidOutputMap.get(tFluidOutput);
                        tParallel.left += (tParallel.right + tSpaceLeft) / tCraftSize;
                        tParallel.right = (tParallel.right + tSpaceLeft) % tCraftSize;
                    }
                }
            }
            // now that all partial/restricted hatches have been counted, create a priority queue for our outputs
            // the lowest priority fluid is the number of complete parallel crafts we can support
            PriorityQueue<MutableTriple<Integer, Integer, FluidStack>> aParallelQueue = new PriorityQueue<>(
                Comparator.comparing(MutableTriple::getLeft));
            for (Entry<FluidStack, MutablePair<Integer, Integer>> entry : tParallels.entrySet()) {
                aParallelQueue.add(new MutableTriple<>(entry.getValue().left, entry.getValue().right, entry.getKey()));
            }
            // add extra parallels for open slots as well
            for (GT_MetaTileEntity_Hatch_Output tHatch : mMachineMeta.mOutputHatches) {
                // partially filled or restricted hatch. done in last pass
                if (tHatch.getFluidAmount() > 0 || tHatch.mMode != 0) continue;

                MutableTriple<Integer, Integer, FluidStack> tParallel = aParallelQueue.poll();
                assert tParallel != null; // will always be true, specifying assert here to avoid IDE/compiler warnings
                Integer tCraftSize = tFluidOutputMap.get(tParallel.right);
                int tSpaceLeft = tHatch.getCapacity();
                tParallel.left += (tParallel.middle + tSpaceLeft) / tCraftSize;
                tParallel.middle = (tParallel.middle + tSpaceLeft) % tCraftSize;
                aParallelQueue.add(tParallel);
            }
            return aParallelQueue.element().left;
        }
        return 0;
    }

    /**
     * Calculates the max parallels one can do with items if void protection is on
     */
    private int calculateMaxParallelsForBusses() {
        // Same thing we are gonna ignore MuTEs existence for now. should be in theory the same later

        // A map to hold the items we will be 'inputting' into the output buses. These itemstacks are actually the
        // recipe outputs.
        Map<ItemStack, Integer> tItemOutputMap = new ItemStackMap<>();

        // Map that keeps track of the number of parallel crafts we can accommodate for each item output.
        // In the pair, we keep track of number of full crafts plus number of items in a partial craft, to avoid
        // issues with floating point math not being completely accurate when summing.
        Map<ItemStack, MutablePair<Integer, Integer>> tParallels = new ItemStackMap<>();
        int tSlotsFree = 0;
        for (ItemStack tItem : mRecipe.mOutputs) {
            tItemOutputMap.merge(tItem, tItem.stackSize, Integer::sum);
            tParallels.put(tItem, new MutablePair<>(0, 0));
        }

        if (tItemOutputMap.isEmpty()) {
            // nothing to output, bail early
            return mMaxParallel;
        }

        if (mRecipe.mOutputs.length > 0 && mMachineMeta != null) {
            for (final GT_MetaTileEntity_Hatch tBus : mMachineMeta.mOutputBusses) {
                if (!GT_MetaTileEntity_MultiBlockBase.isValidMetaTileEntity(tBus)) {
                    continue;
                }
                final IInventory tBusInv = tBus.getBaseMetaTileEntity();
                for (int i = 0; i < tBusInv.getSizeInventory(); i++) {
                    ItemStack tBusStack = tBus.getStackInSlot(i);
                    if (tBusStack == null) {
                        tSlotsFree++;
                    } else {
                        // get the real stack size
                        // we ignore the bus inventory stack limit here as no one set it to anything other than 64
                        int tMaxBusStackSize = tBusStack.getMaxStackSize();
                        if (tBusStack.stackSize >= tMaxBusStackSize)
                            // this bus stack is full. no checking
                            continue;
                        int tSpaceLeft = tMaxBusStackSize - tBusStack.stackSize;
                        Integer tCraftSize = tItemOutputMap.get(tBusStack);
                        if (tCraftSize == null) {
                            // we don't have a matching stack to output, ignore this bus stack
                            continue;
                        }
                        MutablePair<Integer, Integer> tParallel = tParallels.get(tBusStack);
                        tParallel.left += (tParallel.right + tSpaceLeft) / tCraftSize;
                        tParallel.right = (tParallel.right + tSpaceLeft) % tCraftSize;
                    }
                }
            }
            // now that all partial stacks have been counted, create a priority queue for our outputs
            // the lowest priority item is the number of complete parallel crafts we can support
            PriorityQueue<MutableTriple<Integer, Integer, ItemStack>> aParallelQueue = new PriorityQueue<>(
                Comparator.comparing(MutableTriple::getLeft));
            for (Entry<ItemStack, MutablePair<Integer, Integer>> entry : tParallels.entrySet()) {
                aParallelQueue.add(new MutableTriple<>(entry.getValue().left, entry.getValue().right, entry.getKey()));
            }

            while (tSlotsFree > 0) {
                MutableTriple<Integer, Integer, ItemStack> tParallel = aParallelQueue.poll();
                assert tParallel != null; // will always be true, specifying assert here to avoid IDE/compiler warnings
                Integer tCraftSize = tItemOutputMap.get(tParallel.right);
                int tStackSize = tParallel.right.getMaxStackSize();
                tParallel.left += (tParallel.middle + tStackSize) / tCraftSize;
                tParallel.middle = (tParallel.middle + tStackSize) % tCraftSize;
                aParallelQueue.add(tParallel);
                --tSlotsFree;
            }

            return aParallelQueue.element().left;
        }
        return 0;
    }
}