Back to home page

OSCL-LXR

 
 

    


0001 -- Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
0002 --
0003 -- Window Functions Testing
0004 -- https://github.com/postgres/postgres/blob/REL_12_STABLE/src/test/regress/sql/window.sql#L913-L1278
0005 
0006 -- Test window operator with codegen on and off.
0007 --CONFIG_DIM1 spark.sql.codegen.wholeStage=true
0008 --CONFIG_DIM1 spark.sql.codegen.wholeStage=false,spark.sql.codegen.factoryMode=CODEGEN_ONLY
0009 --CONFIG_DIM1 spark.sql.codegen.wholeStage=false,spark.sql.codegen.factoryMode=NO_CODEGEN
0010 
0011 -- Spark doesn't handle UDFs in SQL
0012 -- test user-defined window function with named args and default args
0013 -- CREATE FUNCTION nth_value_def(val anyelement, n integer = 1) RETURNS anyelement
0014 --   LANGUAGE internal WINDOW IMMUTABLE STRICT AS 'window_nth_value';
0015 
0016 -- Spark doesn't handle UDFs in SQL
0017 -- SELECT nth_value_def(n := 2, val := ten) OVER (PARTITION BY four), ten, four
0018 --   FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
0019 
0020 -- Spark doesn't handle UDFs in SQL
0021 -- SELECT nth_value_def(ten) OVER (PARTITION BY four), ten, four
0022 --   FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
0023 
0024 --
0025 -- Test the basic moving-aggregate machinery
0026 --
0027 
0028 -- create aggregates that record the series of transform calls (these are
0029 -- intentionally not true inverses)
0030 
0031 -- Spark doesn't handle UDFs in SQL
0032 -- CREATE FUNCTION logging_sfunc_nonstrict(text, anyelement) RETURNS text AS
0033 -- $$ SELECT COALESCE($1, '') || '*' || quote_nullable($2) $$
0034 -- LANGUAGE SQL IMMUTABLE;
0035 
0036 -- Spark doesn't handle UDFs in SQL
0037 -- CREATE FUNCTION logging_msfunc_nonstrict(text, anyelement) RETURNS text AS
0038 -- $$ SELECT COALESCE($1, '') || '+' || quote_nullable($2) $$
0039 -- LANGUAGE SQL IMMUTABLE;
0040 
0041 -- Spark doesn't handle UDFs in SQL
0042 -- CREATE FUNCTION logging_minvfunc_nonstrict(text, anyelement) RETURNS text AS
0043 -- $$ SELECT $1 || '-' || quote_nullable($2) $$
0044 -- LANGUAGE SQL IMMUTABLE;
0045 
0046 -- Spark doesn't handle UDFs in SQL
0047 -- CREATE AGGREGATE logging_agg_nonstrict (anyelement)
0048 -- (
0049 --      stype = text,
0050 --      sfunc = logging_sfunc_nonstrict,
0051 --      mstype = text,
0052 --      msfunc = logging_msfunc_nonstrict,
0053 --      minvfunc = logging_minvfunc_nonstrict
0054 -- );
0055 
0056 -- Spark doesn't handle UDFs in SQL
0057 -- CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement)
0058 -- (
0059 --      stype = text,
0060 --      sfunc = logging_sfunc_nonstrict,
0061 --      mstype = text,
0062 --      msfunc = logging_msfunc_nonstrict,
0063 --      minvfunc = logging_minvfunc_nonstrict,
0064 --      initcond = 'I',
0065 --      minitcond = 'MI'
0066 -- );
0067 
0068 -- Spark doesn't handle UDFs in SQL
0069 -- CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS
0070 -- $$ SELECT $1 || '*' || quote_nullable($2) $$
0071 -- LANGUAGE SQL STRICT IMMUTABLE;
0072 
0073 -- Spark doesn't handle UDFs in SQL
0074 -- CREATE FUNCTION logging_msfunc_strict(text, anyelement) RETURNS text AS
0075 -- $$ SELECT $1 || '+' || quote_nullable($2) $$
0076 -- LANGUAGE SQL STRICT IMMUTABLE;
0077 
0078 -- Spark doesn't handle UDFs in SQL
0079 -- CREATE FUNCTION logging_minvfunc_strict(text, anyelement) RETURNS text AS
0080 -- $$ SELECT $1 || '-' || quote_nullable($2) $$
0081 -- LANGUAGE SQL STRICT IMMUTABLE;
0082 
0083 -- Spark doesn't handle UDFs in SQL
0084 -- CREATE AGGREGATE logging_agg_strict (text)
0085 -- (
0086 --      stype = text,
0087 --      sfunc = logging_sfunc_strict,
0088 --      mstype = text,
0089 --      msfunc = logging_msfunc_strict,
0090 --      minvfunc = logging_minvfunc_strict
0091 -- );
0092 
0093 -- Spark doesn't handle UDFs in SQL
0094 -- CREATE AGGREGATE logging_agg_strict_initcond (anyelement)
0095 -- (
0096 --      stype = text,
0097 --      sfunc = logging_sfunc_strict,
0098 --      mstype = text,
0099 --      msfunc = logging_msfunc_strict,
0100 --      minvfunc = logging_minvfunc_strict,
0101 --      initcond = 'I',
0102 --      minitcond = 'MI'
0103 -- );
0104 
0105 -- Spark doesn't handle UDFs in SQL
0106 -- test strict and non-strict cases
0107 -- SELECT
0108 --      p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row,
0109 --      logging_agg_nonstrict(v) over wnd as nstrict,
0110 --      logging_agg_nonstrict_initcond(v) over wnd as nstrict_init,
0111 --      logging_agg_strict(v::text) over wnd as strict,
0112 --      logging_agg_strict_initcond(v) over wnd as strict_init
0113 -- FROM (VALUES
0114 --      (1, 1, NULL),
0115 --      (1, 2, 'a'),
0116 --      (1, 3, 'b'),
0117 --      (1, 4, NULL),
0118 --      (1, 5, NULL),
0119 --      (1, 6, 'c'),
0120 --      (2, 1, NULL),
0121 --      (2, 2, 'x'),
0122 --      (3, 1, 'z')
0123 -- ) AS t(p, i, v)
0124 -- WINDOW wnd AS (PARTITION BY P ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
0125 -- ORDER BY p, i;
0126 
0127 -- Spark doesn't handle UDFs in SQL
0128 -- and again, but with filter
0129 -- SELECT
0130 --      p::text || ',' || i::text || ':' ||
0131 --              CASE WHEN f THEN COALESCE(v::text, 'NULL') ELSE '-' END as row,
0132 --      logging_agg_nonstrict(v) filter(where f) over wnd as nstrict_filt,
0133 --      logging_agg_nonstrict_initcond(v) filter(where f) over wnd as nstrict_init_filt,
0134 --      logging_agg_strict(v::text) filter(where f) over wnd as strict_filt,
0135 --      logging_agg_strict_initcond(v) filter(where f) over wnd as strict_init_filt
0136 -- FROM (VALUES
0137 --      (1, 1, true,  NULL),
0138 --      (1, 2, false, 'a'),
0139 --      (1, 3, true,  'b'),
0140 --      (1, 4, false, NULL),
0141 --      (1, 5, false, NULL),
0142 --      (1, 6, false, 'c'),
0143 --      (2, 1, false, NULL),
0144 --      (2, 2, true,  'x'),
0145 --      (3, 1, true,  'z')
0146 -- ) AS t(p, i, f, v)
0147 -- WINDOW wnd AS (PARTITION BY p ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
0148 -- ORDER BY p, i;
0149 
0150 -- Spark doesn't handle UDFs in SQL
0151 -- test that volatile arguments disable moving-aggregate mode
0152 -- SELECT
0153 --      i::text || ':' || COALESCE(v::text, 'NULL') as row,
0154 --      logging_agg_strict(v::text)
0155 --              over wnd as inverse,
0156 --      logging_agg_strict(v::text || CASE WHEN random() < 0 then '?' ELSE '' END)
0157 --              over wnd as noinverse
0158 -- FROM (VALUES
0159 --      (1, 'a'),
0160 --      (2, 'b'),
0161 --      (3, 'c')
0162 -- ) AS t(i, v)
0163 -- WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
0164 -- ORDER BY i;
0165 
0166 -- Spark doesn't handle UDFs in SQL
0167 -- SELECT
0168 --      i::text || ':' || COALESCE(v::text, 'NULL') as row,
0169 --      logging_agg_strict(v::text) filter(where true)
0170 --              over wnd as inverse,
0171 --      logging_agg_strict(v::text) filter(where random() >= 0)
0172 --              over wnd as noinverse
0173 -- FROM (VALUES
0174 --      (1, 'a'),
0175 --      (2, 'b'),
0176 --      (3, 'c')
0177 -- ) AS t(i, v)
0178 -- WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
0179 -- ORDER BY i;
0180 
0181 -- Spark doesn't handle UDFs in SQL
0182 -- test that non-overlapping windows don't use inverse transitions
0183 -- SELECT
0184 --      logging_agg_strict(v::text) OVER wnd
0185 -- FROM (VALUES
0186 --      (1, 'a'),
0187 --      (2, 'b'),
0188 --      (3, 'c')
0189 -- ) AS t(i, v)
0190 -- WINDOW wnd AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
0191 -- ORDER BY i;
0192 
0193 -- Spark doesn't handle UDFs in SQL
0194 -- test that returning NULL from the inverse transition functions
0195 -- restarts the aggregation from scratch. The second aggregate is supposed
0196 -- to test cases where only some aggregates restart, the third one checks
0197 -- that one aggregate restarting doesn't cause others to restart.
0198 
0199 -- Spark doesn't handle UDFs in SQL
0200 -- CREATE FUNCTION sum_int_randrestart_minvfunc(int4, int4) RETURNS int4 AS
0201 -- $$ SELECT CASE WHEN random() < 0.2 THEN NULL ELSE $1 - $2 END $$
0202 -- LANGUAGE SQL STRICT;
0203 
0204 -- Spark doesn't handle UDFs in SQL
0205 -- CREATE AGGREGATE sum_int_randomrestart (int4)
0206 -- (
0207 --      stype = int4,
0208 --      sfunc = int4pl,
0209 --      mstype = int4,
0210 --      msfunc = int4pl,
0211 --      minvfunc = sum_int_randrestart_minvfunc
0212 -- );
0213 
0214 -- Spark doesn't handle UDFs in SQL
0215 -- WITH
0216 -- vs AS (
0217 --      SELECT i, (random() * 100)::int4 AS v
0218 --      FROM generate_series(1, 100) AS i
0219 -- ),
0220 -- sum_following AS (
0221 --      SELECT i, SUM(v) OVER
0222 --              (ORDER BY i DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS s
0223 --      FROM vs
0224 -- )
0225 -- SELECT DISTINCT
0226 --      sum_following.s = sum_int_randomrestart(v) OVER fwd AS eq1,
0227 --      -sum_following.s = sum_int_randomrestart(-v) OVER fwd AS eq2,
0228 --      100*3+(vs.i-1)*3 = length(logging_agg_nonstrict(''::text) OVER fwd) AS eq3
0229 -- FROM vs
0230 -- JOIN sum_following ON sum_following.i = vs.i
0231 -- WINDOW fwd AS (
0232 --      ORDER BY vs.i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
0233 -- );
0234 
0235 --
0236 -- Test various built-in aggregates that have moving-aggregate support
0237 --
0238 
0239 -- test inverse transition functions handle NULLs properly
0240 SELECT i,AVG(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0241   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0242 
0243 SELECT i,AVG(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0244   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0245 
0246 SELECT i,AVG(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0247   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0248 
0249 SELECT i,AVG(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0250   FROM (VALUES(1,1.5),(2,2.5),(3,NULL),(4,NULL)) t(i,v);
0251 
0252 -- [SPARK-28602] Spark does not recognize 'interval' type as 'numeric'
0253 -- SELECT i,AVG(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0254 --   FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
0255 
0256 SELECT i,SUM(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0257   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0258 
0259 SELECT i,SUM(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0260   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0261 
0262 SELECT i,SUM(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0263   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0264 
0265 -- The cast syntax is present in PgSQL for legacy reasons and Spark will not recognize a money field
0266 -- SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0267 --   FROM (VALUES(1,'1.10'),(2,'2.20'),(3,NULL),(4,NULL)) t(i,v);
0268 
0269 -- [SPARK-28602] Spark does not recognize 'interval' type as 'numeric'
0270 -- SELECT i,SUM(cast(v as interval)) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0271 --   FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
0272 
0273 SELECT i,SUM(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0274   FROM (VALUES(1,1.1),(2,2.2),(3,NULL),(4,NULL)) t(i,v);
0275 
0276 SELECT SUM(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0277   FROM (VALUES(1,1.01),(2,2),(3,3)) v(i,n);
0278 
0279 SELECT i,COUNT(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0280   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0281 
0282 SELECT i,COUNT(*) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0283   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0284 
0285 SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0286   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0287 
0288 SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0289   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0290 
0291 SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0292   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0293 
0294 SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0295   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0296 
0297 SELECT VAR_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0298   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0299 
0300 SELECT VAR_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0301   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0302 
0303 SELECT VAR_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0304   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0305 
0306 SELECT VAR_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0307   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0308 
0309 SELECT VARIANCE(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0310   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0311 
0312 SELECT VARIANCE(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0313   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0314 
0315 SELECT VARIANCE(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0316   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0317 
0318 SELECT VARIANCE(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0319   FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0320 
0321 SELECT STDDEV_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0322   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0323 
0324 SELECT STDDEV_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0325   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0326 
0327 SELECT STDDEV_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0328   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0329 
0330 SELECT STDDEV_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0331   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0332 
0333 -- For the following queries Spark result differs from PgSQL:
0334 -- Spark handles division by zero as 'NaN' instead of 'NULL', which is the PgSQL behaviour
0335 SELECT STDDEV_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0336   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0337 
0338 SELECT STDDEV_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0339   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0340 
0341 SELECT STDDEV_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0342   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0343 
0344 SELECT STDDEV_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0345   FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
0346 
0347 SELECT STDDEV(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0348   FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0349 
0350 SELECT STDDEV(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0351   FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0352 
0353 SELECT STDDEV(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0354   FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0355 
0356 SELECT STDDEV(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
0357   FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
0358 
0359 -- test that inverse transition functions work with various frame options
0360 SELECT i,SUM(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
0361   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0362 
0363 SELECT i,SUM(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)
0364   FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
0365 
0366 SELECT i,SUM(v) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
0367   FROM (VALUES(1,1),(2,2),(3,3),(4,4)) t(i,v);
0368 
0369 -- [SPARK-29638] Spark handles 'NaN' as 0 in sums
0370 -- ensure aggregate over numeric properly recovers from NaN values
0371 SELECT a, b,
0372        SUM(b) OVER(ORDER BY A ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
0373 FROM (VALUES(1,1),(2,2),(3,(cast('nan' as int))),(4,3),(5,4)) t(a,b);
0374 
0375 -- It might be tempting for someone to add an inverse trans function for
0376 -- float and double precision. This should not be done as it can give incorrect
0377 -- results. This test should fail if anyone ever does this without thinking too
0378 -- hard about it.
0379 -- [SPARK-28516] adds `to_char`
0380 -- SELECT to_char(SUM(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING),'999999999999999999999D9')
0381 --   FROM (VALUES(1,1e20),(2,1)) n(i,n);
0382 
0383 -- [SPARK-27880] Implement boolean aggregates(BOOL_AND, BOOL_OR and EVERY)
0384 -- SELECT i, b, bool_and(b) OVER w, bool_or(b) OVER w
0385 --   FROM (VALUES (1,true), (2,true), (3,false), (4,false), (5,true)) v(i,b)
0386 --   WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING);
0387 
0388 -- Tests for problems with failure to walk or mutate expressions
0389 -- within window frame clauses.
0390 
0391 -- [SPARK-27974] Add built-in Aggregate Function: array_agg
0392 -- test walker (fails with collation error if expressions are not walked)
0393 -- SELECT array_agg(i) OVER w
0394 --   FROM range(1,6) i
0395 -- WINDOW w AS (ORDER BY i ROWS BETWEEN (('foo' < 'foobar')::integer) PRECEDING AND CURRENT ROW);
0396 
0397 -- Spark doesn't handle UDFs in SQL
0398 -- test mutator (fails when inlined if expressions are not mutated)
0399 -- CREATE FUNCTION pg_temp.f(group_size BIGINT) RETURNS SETOF integer[]
0400 -- AS $$
0401 --     SELECT array_agg(s) OVER w
0402 --       FROM generate_series(1,5) s
0403 --     WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING)
0404 -- $$ LANGUAGE SQL STABLE;