|
29 | 29 | import io.reactivex.internal.schedulers.ImmediateThinScheduler;
|
30 | 30 | import io.reactivex.internal.subscribers.*;
|
31 | 31 | import io.reactivex.internal.util.*;
|
| 32 | +import io.reactivex.parallel.ParallelFlowable; |
32 | 33 | import io.reactivex.plugins.RxJavaPlugins;
|
33 | 34 | import io.reactivex.schedulers.*;
|
34 | 35 | import io.reactivex.subscribers.*;
|
@@ -10363,6 +10364,100 @@ public final Flowable<T> onTerminateDetach() {
|
10363 | 10364 | return RxJavaPlugins.onAssembly(new FlowableDetach<T>(this));
|
10364 | 10365 | }
|
10365 | 10366 |
|
| 10367 | + /** |
| 10368 | + * Parallelizes the flow by creating multiple 'rails' (equal to the number of CPUs) |
| 10369 | + * and dispatches the upstream items to them in a round-robin fashion. |
| 10370 | + * <p> |
| 10371 | + * Note that the rails don't execute in parallel on their own and one needs to |
| 10372 | + * apply {@link ParallelFlowable#runOn(Scheduler)} to specify the Scheduler where |
| 10373 | + * each rail will execute. |
| 10374 | + * <p> |
| 10375 | + * To merge the parallel 'rails' back into a single sequence, use {@link ParallelFlowable#sequential()}. |
| 10376 | + * <p> |
| 10377 | + * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flowable.parallel.png" alt=""> |
| 10378 | + * <dl> |
| 10379 | + * <dt><b>Backpressure:</b></dt> |
| 10380 | + * <dd>The operator requires the upstream to honor backpressure and each 'rail' honors backpressure |
| 10381 | + * as well.</dd> |
| 10382 | + * <dt><b>Scheduler:</b></dt> |
| 10383 | + * <dd>{@code parallel} does not operate by default on a particular {@link Scheduler}.</dd> |
| 10384 | + * </dl> |
| 10385 | + * @return the new ParallelFlowable instance |
| 10386 | + * @since 2.0.5 - experimental |
| 10387 | + */ |
| 10388 | + @BackpressureSupport(BackpressureKind.FULL) |
| 10389 | + @SchedulerSupport(SchedulerSupport.NONE) |
| 10390 | + @CheckReturnValue |
| 10391 | + @Experimental |
| 10392 | + public final ParallelFlowable<T> parallel() { |
| 10393 | + return ParallelFlowable.from(this); |
| 10394 | + } |
| 10395 | + |
| 10396 | + /** |
| 10397 | + * Parallelizes the flow by creating the specified number of 'rails' |
| 10398 | + * and dispatches the upstream items to them in a round-robin fashion. |
| 10399 | + * <p> |
| 10400 | + * Note that the rails don't execute in parallel on their own and one needs to |
| 10401 | + * apply {@link ParallelFlowable#runOn(Scheduler)} to specify the Scheduler where |
| 10402 | + * each rail will execute. |
| 10403 | + * <p> |
| 10404 | + * To merge the parallel 'rails' back into a single sequence, use {@link ParallelFlowable#sequential()}. |
| 10405 | + * <p> |
| 10406 | + * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flowable.parallel.png" alt=""> |
| 10407 | + * <dl> |
| 10408 | + * <dt><b>Backpressure:</b></dt> |
| 10409 | + * <dd>The operator requires the upstream to honor backpressure and each 'rail' honors backpressure |
| 10410 | + * as well.</dd> |
| 10411 | + * <dt><b>Scheduler:</b></dt> |
| 10412 | + * <dd>{@code parallel} does not operate by default on a particular {@link Scheduler}.</dd> |
| 10413 | + * </dl> |
| 10414 | + * @param parallelism the number of 'rails' to use |
| 10415 | + * @return the new ParallelFlowable instance |
| 10416 | + * @since 2.0.5 - experimental |
| 10417 | + */ |
| 10418 | + @BackpressureSupport(BackpressureKind.FULL) |
| 10419 | + @SchedulerSupport(SchedulerSupport.NONE) |
| 10420 | + @CheckReturnValue |
| 10421 | + @Experimental |
| 10422 | + public final ParallelFlowable<T> parallel(int parallelism) { |
| 10423 | + ObjectHelper.verifyPositive(parallelism, "parallelism"); |
| 10424 | + return ParallelFlowable.from(this, parallelism); |
| 10425 | + } |
| 10426 | + |
| 10427 | + /** |
| 10428 | + * Parallelizes the flow by creating the specified number of 'rails' |
| 10429 | + * and dispatches the upstream items to them in a round-robin fashion and |
| 10430 | + * uses the defined per-'rail' prefetch amount. |
| 10431 | + * <p> |
| 10432 | + * Note that the rails don't execute in parallel on their own and one needs to |
| 10433 | + * apply {@link ParallelFlowable#runOn(Scheduler)} to specify the Scheduler where |
| 10434 | + * each rail will execute. |
| 10435 | + * <p> |
| 10436 | + * To merge the parallel 'rails' back into a single sequence, use {@link ParallelFlowable#sequential()}. |
| 10437 | + * <p> |
| 10438 | + * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flowable.parallel.png" alt=""> |
| 10439 | + * <dl> |
| 10440 | + * <dt><b>Backpressure:</b></dt> |
| 10441 | + * <dd>The operator requires the upstream to honor backpressure and each 'rail' honors backpressure |
| 10442 | + * as well.</dd> |
| 10443 | + * <dt><b>Scheduler:</b></dt> |
| 10444 | + * <dd>{@code parallel} does not operate by default on a particular {@link Scheduler}.</dd> |
| 10445 | + * </dl> |
| 10446 | + * @param parallelism the number of 'rails' to use |
| 10447 | + * @param prefetch the number of items each 'rail' should prefetch |
| 10448 | + * @return the new ParallelFlowable instance |
| 10449 | + * @since 2.0.5 - experimental |
| 10450 | + */ |
| 10451 | + @BackpressureSupport(BackpressureKind.FULL) |
| 10452 | + @SchedulerSupport(SchedulerSupport.NONE) |
| 10453 | + @CheckReturnValue |
| 10454 | + @Experimental |
| 10455 | + public final ParallelFlowable<T> parallel(int parallelism, int prefetch) { |
| 10456 | + ObjectHelper.verifyPositive(parallelism, "parallelism"); |
| 10457 | + ObjectHelper.verifyPositive(prefetch, "prefetch"); |
| 10458 | + return ParallelFlowable.from(this, parallelism, prefetch); |
| 10459 | + } |
| 10460 | + |
10366 | 10461 | /**
|
10367 | 10462 | * Returns a {@link ConnectableFlowable}, which is a variety of Publisher that waits until its
|
10368 | 10463 | * {@link ConnectableFlowable#connect connect} method is called before it begins emitting items to those
|
|
0 commit comments