1 Commits
main ... cvpr26

Author SHA1 Message Date
Tobias Christian Nauen
e7c0b531d6 cvpr submission 2026-02-24 12:01:26 +01:00
59 changed files with 7238 additions and 4939 deletions

File diff suppressed because one or more lines are too long

21
README.md Normal file
View File

@@ -0,0 +1,21 @@
# CVPR/ICCV/3DV Official LaTeX template
**Note:** the Microsoft Word version of the template is in the branch [`main-msword`](https://github.com/cvpr-org/author-kit/tree/main-msword).
### History (in reverse chronological order)
- updated for CVPR 2026 [Vladimir Pavlovic](mailto:vladimir@rutgers.edu)
- added styles for `subsubsection` and fixed the wrong PDF bookmarks by [Di Fang](https://github.com/fang-d)
- modernized for CVPR 2025 by [Christian Richardt](https://richardt.name/)
- fixed page centering for CVPR 2025 by [Stefan Roth](mailto:stefan.roth@NOSPAMtu-darmstadt.de)
- inline enumerations and `cvprblue` links for CVPR 2025 by [Ioannis Gkioulekas
](https://www.cs.cmu.edu/~igkioule/)
- added automated LaTeX build testing for CVPR 2025 by [Ahan Shabanov](https://ahanio.github.io)
- references in `cvprblue` for CVPR 2024 by [Klaus Greff](https://github.com/Qwlouse)
- added natbib for CVPR 2024 by [Christian Richardt](https://richardt.name/)
- replaced buggy (review-mode) line numbering for 3DV 2024 by [Adín Ramírez Rivera
](https://openreview.net/profile?id=~Ad%C3%ADn_Ram%C3%ADrez_Rivera1)
- logic for inline supplementary for 3DV 2024 by [Andrea Tagliasacchi](https://taiya.github.io)
- modernized for CVPR 2022 by [Stefan Roth](mailto:stefan.roth@NOSPAMtu-darmstadt.de)
- created cvpr.sty file to unify review/rebuttal/final versions by [Ming-Ming Cheng](https://github.com/MCG-NKU/CVPR_Template)
- developed CVPR 2005 template by [Paolo Ienne](mailto:Paolo.Ienne@di.epfl.ch) and [Andrew Fitzgibbon](mailto:awf@acm.org)

508
cvpr.sty Normal file
View File

@@ -0,0 +1,508 @@
% ---------------------------------------------------------------
%
% No guarantee is given that the format corresponds perfectly to
% IEEE 8.5" x 11" Proceedings, but most features should be ok.
%
% ---------------------------------------------------------------
% with LaTeX2e:
% =============
%
% use as
% \documentclass[times,10pt,twocolumn]{article}
% \usepackage[options]{cvpr}
% \usepackage{times}
%
% "options" should be replaced by
% * "review" for submitting a paper for review,
% * "final" for the camera ready, and
% * "rebuttal" for the author rebuttal.
%
% specify references as
% {\small
% \bibliographystyle{ieee}
% \bibliography{...your files...}
% }
% ---------------------------------------------------------------
\NeedsTeXFormat{LaTeX2e}[1999/12/01]
\ProvidesPackage{cvpr}[2026 LaTeX class for IEEE CVPR]
\RequirePackage{times} % Integrate Times for here
\RequirePackage{xspace}
\RequirePackage[dvipsnames]{xcolor}
\RequirePackage{graphicx}
\RequirePackage{amsmath}
\RequirePackage{amssymb}
\RequirePackage{booktabs}
\RequirePackage[numbers,sort&compress]{natbib}
\setlength{\bibsep}{1pt plus 1pt minus 1pt}
\RequirePackage{silence} % Suppress unwanted warnings
\hbadness=10000 \vbadness=10000 \vfuzz=30pt \hfuzz=30pt
\WarningFilter{latexfont}{Font shape declaration}
\WarningFilter{latex}{Font shape}
\WarningFilter[rebuttal]{latex}{No \author given}
\RequirePackage{etoolbox}
% Use modern caption package to allow for sub-figures etc.
% Reproduces the original CVPR/ICCV style as closely as possible.
\RequirePackage[format=plain,labelformat=simple,labelsep=period,font=small,compatibility=false]{caption}
\RequirePackage[font=footnotesize,skip=3pt,subrefformat=parens]{subcaption}
\newtoggle{cvprfinal} % Camera-ready version
\newtoggle{cvprrebuttal} % Rebuttal
\newtoggle{cvprpagenumbers} % Force page numbers (in camera ready)
\toggletrue{cvprfinal}
\togglefalse{cvprrebuttal}
\togglefalse{cvprpagenumbers}
\DeclareOption{review}{\togglefalse{cvprfinal}\toggletrue{cvprpagenumbers}}
\DeclareOption{rebuttal}{\togglefalse{cvprfinal}\toggletrue{cvprrebuttal}}
\DeclareOption{pagenumbers}{\toggletrue{cvprpagenumbers}}
\DeclareOption*{\PackageWarning{cvpr}{Unkown option `\CurrentOption'}}
\ProcessOptions\relax
% Don't warn about missing author for rebuttal
\iftoggle{cvprrebuttal}{%
\ActivateWarningFilters[rebuttal]
}{}
% Breaking lines for URLs in the bib
\RequirePackage[hyphens]{url}
\Urlmuskip=0mu plus 1mu\relax
% ---------------------------------------------------------------
% Inlined version of the obsolete "everyshi-2001-05-15" package.
\newcommand{\@EveryShipout@Hook}{}
\newcommand{\@EveryShipout@AtNextHook}{}
\newcommand*{\EveryShipout}[1]
{\g@addto@macro\@EveryShipout@Hook{#1}}
\newcommand*{\AtNextShipout}[1]
{\g@addto@macro\@EveryShipout@AtNextHook{#1}}
\newcommand{\@EveryShipout@Shipout}{%
\afterassignment\@EveryShipout@Test
\global\setbox\@cclv= %
}
\newcommand{\@EveryShipout@Test}{%
\ifvoid\@cclv\relax
\aftergroup\@EveryShipout@Output
\else
\@EveryShipout@Output
\fi%
}
\newcommand{\@EveryShipout@Output}{%
\@EveryShipout@Hook%
\@EveryShipout@AtNextHook%
\gdef\@EveryShipout@AtNextHook{}%
\@EveryShipout@Org@Shipout\box\@cclv%
}
\newcommand{\@EveryShipout@Org@Shipout}{}
\newcommand*{\@EveryShipout@Init}{%
\message{ABD: EveryShipout initializing macros}%
\let\@EveryShipout@Org@Shipout\shipout
\let\shipout\@EveryShipout@Shipout
}
\AtBeginDocument{\@EveryShipout@Init}
% ---------------------------------------------------------------
% ---------------------------------------------------------------
% Inlined simplified version of the "eso-pic" package.
\newcommand\LenToUnit[1]{#1\@gobble}
\newcommand\AtPageUpperLeft[1]{%
\begingroup
\@tempdima=0pt\relax\@tempdimb=\ESO@yoffsetI\relax
\put(\LenToUnit{\@tempdima},\LenToUnit{\@tempdimb}){#1}%
\endgroup
}
\newcommand\AtPageLowerLeft[1]{\AtPageUpperLeft{%
\put(0,\LenToUnit{-\paperheight}){#1}}}
\newcommand\AtPageCenter[1]{\AtPageUpperLeft{%
\put(\LenToUnit{.5\paperwidth},\LenToUnit{-.5\paperheight}){#1}}%
}
\newcommand\AtTextUpperLeft[1]{%
\begingroup
\setlength\@tempdima{1in}%
\ifodd\c@page%
\advance\@tempdima\oddsidemargin%
\else%
\advance\@tempdima\evensidemargin%
\fi%
\@tempdimb=\ESO@yoffsetI\relax\advance\@tempdimb-1in\relax%
\advance\@tempdimb-\topmargin%
\advance\@tempdimb-\headheight\advance\@tempdimb-\headsep%
\put(\LenToUnit{\@tempdima},\LenToUnit{\@tempdimb}){#1}%
\endgroup
}
\newcommand\AtTextLowerLeft[1]{\AtTextUpperLeft{%
\put(0,\LenToUnit{-\textheight}){#1}}}
\newcommand\AtTextCenter[1]{\AtTextUpperLeft{%
\put(\LenToUnit{.5\textwidth},\LenToUnit{-.5\textheight}){#1}}}
\newcommand{\ESO@HookI}{} \newcommand{\ESO@HookII}{}
\newcommand{\ESO@HookIII}{}
\newcommand{\AddToShipoutPicture}{%
\@ifstar{\g@addto@macro\ESO@HookII}{\g@addto@macro\ESO@HookI}}
\newcommand{\ClearShipoutPicture}{\global\let\ESO@HookI\@empty}
\newcommand\ESO@isMEMOIR[1]{}
\@ifclassloaded{memoir}{\renewcommand\ESO@isMEMOIR[1]{#1}}{}
\newcommand{\@ShipoutPicture}{%
\bgroup
\@tempswafalse%
\ifx\ESO@HookI\@empty\else\@tempswatrue\fi%
\ifx\ESO@HookII\@empty\else\@tempswatrue\fi%
\ifx\ESO@HookIII\@empty\else\@tempswatrue\fi%
\if@tempswa%
\@tempdima=1in\@tempdimb=-\@tempdima%
\advance\@tempdimb\ESO@yoffsetI%
\ESO@isMEMOIR{%
\advance\@tempdima\trimedge%
\advance\@tempdima\paperwidth%
\advance\@tempdima-\stockwidth%
\if@twoside\ifodd\c@page\else%
\advance\@tempdima-2\trimedge%
\advance\@tempdima-\paperwidth%
\advance\@tempdima\stockwidth%
\fi\fi%
\advance\@tempdimb\trimtop}%
\unitlength=1pt%
\global\setbox\@cclv\vbox{%
\vbox{\let\protect\relax
\pictur@(0,0)(\strip@pt\@tempdima,\strip@pt\@tempdimb)%
\ESO@HookIII\ESO@HookI\ESO@HookII%
\global\let\ESO@HookII\@empty%
\endpicture}%
\nointerlineskip%
\box\@cclv}%
\fi
\egroup
}
\EveryShipout{\@ShipoutPicture}
\RequirePackage{keyval}
\newif\ifESO@dvips\ESO@dvipsfalse
\newif\ifESO@texcoord\ESO@texcoordfalse
\AtBeginDocument{%
\IfFileExists{color.sty}
{%
\RequirePackage{color}
\let\ESO@color=\color\let\ESO@colorbox=\colorbox
\let\ESO@fcolorbox=\fcolorbox
}{}
\@ifundefined{Gin@driver}{}%
{%
\ifx\Gin@driver\@empty\else%
\filename@parse{\Gin@driver}\def\reserved@a{dvips}%
\ifx\filename@base\reserved@a\ESO@dvipstrue\fi%
\fi
}%
\ifx\pdfoutput\undefined\else
\ifx\pdfoutput\relax\else
\ifcase\pdfoutput\else
\ESO@dvipsfalse%
\fi
\fi
\fi
}
\ifESO@texcoord
\def\ESO@yoffsetI{0pt}\def\ESO@yoffsetII{-\paperheight}
\else
\def\ESO@yoffsetI{\paperheight}\def\ESO@yoffsetII{0pt}
\fi
% ---------------------------------------------------------------
\typeout{CVPR 8.5 x 11-Inch Proceedings Style `cvpr.sty'.}
% ten point helvetica bold required for captions
% eleven point times bold required for second-order headings
% in some sites the name of the fonts may differ,
% change the name here:
\font\cvprtenhv = phvb at 8pt % *** IF THIS FAILS, SEE cvpr.sty ***
\font\elvbf = ptmb scaled 1100
\font\tenbf = ptmb scaled 1000
% If the above lines give an error message, try to comment them and
% uncomment these:
%\font\cvprtenhv = phvb7t at 8pt
%\font\elvbf = ptmb7t scaled 1100
%\font\tenbf = ptmb7t scaled 1000
% set dimensions of columns, gap between columns, and paragraph indent
\setlength{\textheight}{8.875in}
\setlength{\textwidth}{6.875in}
\setlength{\columnsep}{0.3125in}
\setlength{\topmargin}{0in}
\setlength{\headheight}{0in}
\setlength{\headsep}{0in}
\setlength{\parindent}{1pc}
\setlength{\oddsidemargin}{-0.1875in}
\setlength{\evensidemargin}{-0.1875in}
% Suppress page numbers when the appropriate option is given
\iftoggle{cvprpagenumbers}{}{%
\pagestyle{empty}
}
\AtBeginDocument{%
% Print an error if document class other than article is used
\@ifclassloaded{article}{}{%
\PackageError{cvpr}{Package only meant to be used with document class `article'}{Change document class to `article'.}
}
% Print a warning if incorrect options for article are specified
\@ifclasswith{article}{10pt}{}{%
\PackageWarningNoLine{cvpr}{Incorrect font size specified - CVPR requires 10-point fonts. Please load document class `article' with `10pt' option}
}
\@ifclasswith{article}{twocolumn}{}{%
\PackageWarningNoLine{cvpr}{Single column document - CVPR requires papers to have two-column layout. Please load document class `article' with `twocolumn' option}
}
\@ifclasswith{article}{letterpaper}{}{%
\PackageWarningNoLine{cvpr}{Incorrect paper size - CVPR uses paper size `letter'. Please load document class `article' with `letterpaper' option}
}
% Print a warning if hyperref is not loaded and/or if the pagebackref option is missing
\iftoggle{cvprfinal}{%
\@ifpackageloaded{hyperref}{}{%
\PackageWarningNoLine{cvpr}{Package `hyperref' is not loaded, but highly recommended for camera-ready version}
}
}{%
\@ifpackageloaded{hyperref}{
\@ifpackagewith{hyperref}{pagebackref}{}{
\PackageWarningNoLine{cvpr}{Package `hyperref' is not loaded with option `pagebackref', which is strongly recommended for review version}
}
}{%
\PackageWarningNoLine{cvpr}{Package `hyperref' is not loaded, but strongly recommended for review version}
}
}
}
\def\@maketitle{
\newpage
\null
\iftoggle{cvprrebuttal}{\vspace*{-.3in}}{\vskip .375in}
\begin{center}
% smaller title font only for rebuttal
\iftoggle{cvprrebuttal}{{\large \bf \@title \par}}{{\Large \bf \@title \par}}
% additional two empty lines at the end of the title
\iftoggle{cvprrebuttal}{\vspace*{-22pt}}{\vspace*{24pt}}{
\large
\lineskip .5em
\begin{tabular}[t]{c}
\iftoggle{cvprfinal}{
\@author
}{
\iftoggle{cvprrebuttal}{}{
Anonymous \confName~submission\\
\vspace*{1pt}\\
Paper ID \paperID
}
}
\end{tabular}
\par
}
% additional small space at the end of the author name
\vskip .5em
% additional empty line at the end of the title block
\vspace*{12pt}
\end{center}
}
\def\abstract{%
% Suppress page numbers when the appropriate option is given
\iftoggle{cvprpagenumbers}{}{%
\thispagestyle{empty}
}
\centerline{\large\bf Abstract}%
\vspace*{12pt}\noindent%
\it\ignorespaces%
}
\def\endabstract{%
% additional empty line at the end of the abstract
\vspace*{12pt}
}
\def\affiliation#1{\gdef\@affiliation{#1}} \gdef\@affiliation{}
% correct heading spacing and type
\def\cvprsection{\@startsection {section}{1}{\z@}
{-10pt plus -2pt minus -2pt}{7pt} {\large\bf}}
\def\cvprssect#1{\cvprsection*{#1}}
\def\cvprsect#1{\cvprsection{\texorpdfstring{\hskip -1em.~}{}#1}}
\def\section{\@ifstar\cvprssect\cvprsect}
\def\cvprsubsection{\@startsection {subsection}{2}{\z@}
{-8pt plus -2pt minus -2pt}{5pt} {\elvbf}}
\def\cvprssubsect#1{\cvprsubsection*{#1}}
\def\cvprsubsect#1{\cvprsubsection{\texorpdfstring{\hskip -1em.~}{}#1}}
\def\subsection{\@ifstar\cvprssubsect\cvprsubsect}
\def\cvprsubsubsection{\@startsection {subsubsection}{3}{\z@}
{-6pt plus -2pt minus -2pt}{3pt} {\tenbf}}
\def\cvprssubsubsect#1{\cvprsubsubsection*{#1}}
\def\cvprsubsubsect#1{\cvprsubsubsection{\texorpdfstring{\hskip -1em.~}{}#1}}
\def\subsubsection{\@ifstar\cvprssubsubsect\cvprsubsubsect}
%% --------- Page background marks: Ruler and confidentiality (only for review and rebuttal)
\iftoggle{cvprfinal}{
% In review and rebuttal mode, we use the "lineno" package for numbering lines.
% When switching to a different mode, the "\@LN" macro may remain in cached .aux files,
% leading to build errors (https://github.com/cvpr-org/author-kit/issues/49).
% Defining the macro as empty fixes that (https://tex.stackexchange.com/a/125779).
\makeatletter
\providecommand{\@LN}[2]{}
\makeatother
}{
% ----- define vruler
\makeatletter
\newbox\cvprrulerbox
\newcount\cvprrulercount
\newdimen\cvprruleroffset
\newdimen\cv@lineheight
\newdimen\cv@boxheight
\newbox\cv@tmpbox
\newcount\cv@refno
\newcount\cv@tot
% NUMBER with left flushed zeros \fillzeros[<WIDTH>]<NUMBER>
\newcount\cv@tmpc@ \newcount\cv@tmpc
\def\fillzeros[#1]#2{\cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi
\cv@tmpc=1 %
\loop\ifnum\cv@tmpc@<10 \else \divide\cv@tmpc@ by 10 \advance\cv@tmpc by 1 \fi
\ifnum\cv@tmpc@=10\relax\cv@tmpc@=11\relax\fi \ifnum\cv@tmpc@>10 \repeat
\ifnum#2<0\advance\cv@tmpc1\relax-\fi
\loop\ifnum\cv@tmpc<#1\relax0\advance\cv@tmpc1\relax\fi \ifnum\cv@tmpc<#1 \repeat
\cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi \relax\the\cv@tmpc@}%
\makeatother
% ----- end of vruler
%% Define linenumber setup
\RequirePackage[switch,mathlines]{lineno}
% Line numbers in CVPR blue using font from \cvprtenhv
\renewcommand\linenumberfont{\cvprtenhv\color[rgb]{.5,.5,1}}
\renewcommand\thelinenumber{\fillzeros[3]{\arabic{linenumber}}}
\setlength{\linenumbersep}{.75cm}
% Bug: An equation with $$ ... $$ isn't numbered, nor is the previous line.
% Patch amsmath commands so that the previous line and the equation itself
% are numbered. Bug: multiline has an extra line number.
% https://tex.stackexchange.com/questions/461186/how-to-use-lineno-with-amsmath-align
\RequirePackage{etoolbox} %% <- for \pretocmd, \apptocmd and \patchcmd
\newcommand*\linenomathpatch[1]{%
\expandafter\pretocmd\csname #1\endcsname {\linenomath}{}{}%
\expandafter\pretocmd\csname #1*\endcsname {\linenomath}{}{}%
\expandafter\apptocmd\csname end#1\endcsname {\endlinenomath}{}{}%
\expandafter\apptocmd\csname end#1*\endcsname {\endlinenomath}{}{}%
}
\newcommand*\linenomathpatchAMS[1]{%
\expandafter\pretocmd\csname #1\endcsname {\linenomathAMS}{}{}%
\expandafter\pretocmd\csname #1*\endcsname {\linenomathAMS}{}{}%
\expandafter\apptocmd\csname end#1\endcsname {\endlinenomath}{}{}%
\expandafter\apptocmd\csname end#1*\endcsname {\endlinenomath}{}{}%
}
%% Definition of \linenomathAMS depends on whether the mathlines option is provided
\expandafter\ifx\linenomath\linenomathWithnumbers
\let\linenomathAMS\linenomathWithnumbers
%% The following line gets rid of an extra line numbers at the bottom:
\patchcmd\linenomathAMS{\advance\postdisplaypenalty\linenopenalty}{}{}{}
\else
\let\linenomathAMS\linenomathNonumbers
\fi
% Add the numbers
\linenumbers
\AtBeginDocument{%
\linenomathpatch{equation}%
\linenomathpatchAMS{gather}%
\linenomathpatchAMS{multline}%
\linenomathpatchAMS{align}%
\linenomathpatchAMS{alignat}%
\linenomathpatchAMS{flalign}%
}
% \makevruler[<SCALE>][<INITIAL_COUNT>][<STEP>][<DIGITS>][<HEIGHT>]
\def\cvprruler#1{\makevruler[12pt][#1][1][3][0.993\textheight]\usebox{\cvprrulerbox}}
\AddToShipoutPicture{%
\color[rgb]{.5,.5,1}
\def\pid{\parbox{1in}{\begin{center}\bf\sf{\small \confName}\\\small \#\paperID\end{center}}}
\AtTextUpperLeft{%paperID in corners
\put(\LenToUnit{-65pt},\LenToUnit{45pt}){\pid}
\put(\LenToUnit{\textwidth-12pt},\LenToUnit{45pt}){\pid}
}
\AtTextUpperLeft{%confidential
\put(0,\LenToUnit{1cm}){\parbox{\textwidth}{\centering\cvprtenhv
\confName~\confYear~Submission \#\paperID. CONFIDENTIAL REVIEW COPY. DO NOT DISTRIBUTE.}}
}
}
} % end of not cvprfinal
%%% Make figure placement a little more predictable.
% We trust the user to move figures if this results
% in ugliness.
% Minimize bad page breaks at figures
\renewcommand{\textfraction}{0.01}
\renewcommand{\floatpagefraction}{0.99}
\renewcommand{\topfraction}{0.99}
\renewcommand{\bottomfraction}{0.99}
\renewcommand{\dblfloatpagefraction}{0.99}
\renewcommand{\dbltopfraction}{0.99}
\setcounter{totalnumber}{99}
\setcounter{topnumber}{99}
\setcounter{bottomnumber}{99}
% Add a period to the end of an abbreviation unless there's one
% already, then \xspace.
\makeatletter
\DeclareRobustCommand\onedot{\futurelet\@let@token\@onedot}
\def\@onedot{\ifx\@let@token.\else.\null\fi\xspace}
\def\eg{\emph{e.g}\onedot} \def\Eg{\emph{E.g}\onedot}
\def\ie{\emph{i.e}\onedot} \def\Ie{\emph{I.e}\onedot}
\def\cf{\emph{cf}\onedot} \def\Cf{\emph{Cf}\onedot}
\def\etc{\emph{etc}\onedot} \def\vs{\emph{vs}\onedot}
\def\wrt{w.r.t\onedot} \def\dof{d.o.f\onedot}
\def\iid{i.i.d\onedot} \def\wolog{w.l.o.g\onedot}
\def\etal{\emph{et al}\onedot}
\makeatother
% ---------------------------------------------------------------
%% redefine the \title command so that a variable name is saved in \thetitle, and provides the \maketitlesupplementary command
\let\titleold\title
\renewcommand{\title}[1]{\titleold{#1}\newcommand{\thetitle}{#1}}
\def\maketitlesupplementary
{
\newpage
\twocolumn[
\centering
\Large
\textbf{\thetitle}\\
\vspace{0.5em}Supplementary Material \\
\vspace{1.0em}
] %< twocolumn
}
% ---------------------------------------------------------------
%% Support for easy cross-referencing (e.g. \cref{sec:intro}
% configured with \AtEndPreamble as it needs to be called after hyperref
\AtEndPreamble{
\usepackage[capitalize]{cleveref}
\crefname{section}{Sec.}{Secs.}
\Crefname{section}{Section}{Sections}
\Crefname{table}{Table}{Tables}
\crefname{table}{Tab.}{Tabs.}
}
% ---------------------------------------------------------------
%% More compact compact itemize/enumeration (e.g. list contributions)
\RequirePackage[shortlabels,inline]{enumitem}
\setlist[itemize]{noitemsep,leftmargin=*,topsep=0em}
\setlist[enumerate]{noitemsep,leftmargin=*,topsep=0em}

255
eccv.sty
View File

@@ -1,255 +0,0 @@
% ---------------------------------------------------------------
%
% Formatting Package for ECCV Submissions
%
% initially created for ECCV 2024
% by Stefan Roth
%
% based on previous ECCV templates:
% updated April 2002 by Antje Endemann
% Based on CVPR 07 and LNCS, with modifications by DAF, AZ and elle, 2008 and AA, 2010, and CC, 2011; TT, 2014; AAS, 2016; AAS, 2020; TH, 2022
%
% and the CVPR templates:
% https://github.com/cvpr-org/author-kit
%
% No guarantee is given that the format corresponds perfectly to
% LNCS Proceedings, but most features should be ok.
%
% ---------------------------------------------------------------
%
% use as
% \documentclass[runningheads]{llncs}
% \usepackage[options]{eccv}
%
% "options" include
% * "review" for submitting a paper for review and
% * "final" for the camera ready (default).
% * "mobile" for camera ready on small-screen devices
% * "year=20??" allows to specify the conference year (default current year).
% * "ID=12345" allows to specify the paper ID (default `none').
%
% specify references as
% \bibliographystyle{splncs04}
% \bibliography{...your files...}
% ---------------------------------------------------------------
\NeedsTeXFormat{LaTeX2e}[1999/12/01]
\ProvidesPackage{eccv}[LaTeX style for ECCV]
% ---------------------------------------------------------------
% Suppress unwanted warnings
\RequirePackage{silence}
\WarningFilter{amsmath}{Unable to redefine math accent \vec}
\WarningFilter{caption}{Unknown document class (or package)}
\RequirePackage{etoolbox}
% ---------------------------------------------------------------
% Basic packages
\RequirePackage[T1]{fontenc} % Required to avoid font issues
\RequirePackage[left,mathlines]{lineno} % Support for line numbers
\RequirePackage[dvipsnames]{xcolor} % Color for line numbers
\RequirePackage{amsmath} % Need AMS packages to bug fix
\RequirePackage{amssymb} % line numbers in equations
\RequirePackage{cite} % Sort citations
\RequirePackage{xspace}
% Breaking lines for URLs in the bib
\RequirePackage[hyphens]{url}
\Urlmuskip=0mu plus 1mu\relax
% Color for links and line numbers
\definecolor{eccvblue}{rgb}{0.12,0.49,0.85}
% ---------------------------------------------------------------
% Use modern caption package to allow for sub-figures etc.
% Reproduces the original LNCS style as closely as possible.
\RequirePackage[labelfont=bf,font=small,tableposition=bottom]{caption}
\RequirePackage[skip=3pt]{subcaption}
% ---------------------------------------------------------------
% Process ECCV package options
% Key value options
\RequirePackage{kvoptions}
\SetupKeyvalOptions{
family=eccv,
prefix=eccv@
}
\DeclareBoolOption{review}
\DeclareComplementaryOption{final}{review}
\DeclareBoolOption{mobile}
\DeclareStringOption[\the\year]{year}
\DeclareStringOption[none]{ID}
\DeclareDefaultOption{\PackageWarning{eccv}{Unkown option `\CurrentOption'}}
\ProcessKeyvalOptions*
% Enable processing options also in main paper with \eccvsetup{ key=value, ... }
\newcommand*{\eccvsetup}
{\setkeys{eccv}%
}
% Warn if ECCV package for review version is not loaded with paper ID option
\ifeccv@review
\ifdefstring{\eccv@ID}{none}{%
\PackageWarningNoLine{eccv}{Review version requires a paper ID. Please load `eccv' package with `ID=*****' option and replace `*****' with your paper ID}
}{}
\fi
% ---------------------------------------------------------------
% Basic error handling
\AtBeginDocument{%
% Print an error if document class other than llncs is used
\@ifclassloaded{llncs}{}{%
\PackageError{eccv}{Package only meant to be used with document class `llncs'}{Change document class to `llncs'.}
}
% Print a warning if incorrect options for llncs are specified
\@ifclasswith{llncs}{runningheads}{}{%
\PackageWarningNoLine{eccv}{Running heads incorrectly suppressed - ECCV requires running heads. Please load document class `llncs' with `runningheads' option}
}
% Print a warning if hyperref is not loaded and/or if the pagebackref option is missing
\ifeccv@review
\@ifpackageloaded{hyperref}{%
\@ifpackagewith{hyperref}{pagebackref}{}{%
\PackageWarningNoLine{eccv}{Package `hyperref' is not loaded with option `pagebackref', which is strongly recommended for review version}
}
}{%
\PackageWarningNoLine{eccv}{Package `hyperref' is not loaded, but strongly recommended for review version}
}
\else
\@ifpackageloaded{hyperref}{%
\@ifpackagewith{hyperref}{pagebackref}{%
\PackageWarningNoLine{eccv}{Package `hyperref' is loaded with option `pagebackref', which is *not* recommended for camera-ready version}{}
}{}
}{%
\PackageWarningNoLine{eccv}{Package `hyperref' is not loaded, but highly recommended for camera-ready version}
}
\fi
}
% ---------------------------------------------------------------
% Line number support for the review version
% NUMBER with left flushed zeros \fillzeros[<WIDTH>]<NUMBER>
% from CVPR template
\newcount\cv@tmpc@ \newcount\cv@tmpc
\def\fillzeros[#1]#2{\cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi
\cv@tmpc=1 %
\loop\ifnum\cv@tmpc@<10 \else \divide\cv@tmpc@ by 10 \advance\cv@tmpc by 1 \fi
\ifnum\cv@tmpc@=10\relax\cv@tmpc@=11\relax\fi \ifnum\cv@tmpc@>10 \repeat
\ifnum#2<0\advance\cv@tmpc1\relax-\fi
\loop\ifnum\cv@tmpc<#1\relax0\advance\cv@tmpc1\relax\fi \ifnum\cv@tmpc<#1 \repeat
\cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi \relax\the\cv@tmpc@}%
% colored, bold, sans serif line numbers
\renewcommand\thelinenumber{\color{eccvblue}\normalfont\sffamily\scriptsize\fillzeros[3]{\arabic{linenumber}}\color[rgb]{0,0,0}}
% on both sides
\renewcommand\makeLineNumber{\hss\thelinenumber\ \hspace{4.5mm} \rlap{\hskip\textwidth\ \hspace{5mm}\thelinenumber}}
% Bug: An equation with $$ ... $$ isn't numbered, nor is the previous line.
% Patch amsmath commands so that the previous line and the equation itself
% are numbered. Bug: multiline has an extra line number.
% https://tex.stackexchange.com/questions/461186/how-to-use-lineno-with-amsmath-align
%% Patch 'normal' math environments:
\newcommand*\linenomathpatch[1]{%
\cspreto{#1}{\linenomath}%
\cspreto{#1*}{\linenomath}%
\csappto{end#1}{\endlinenomath}%
\csappto{end#1*}{\endlinenomath}%
}
%% Patch AMS math environments:
\newcommand*\linenomathpatchAMS[1]{%
\cspreto{#1}{\linenomathAMS}%
\cspreto{#1*}{\linenomathAMS}%
\csappto{end#1}{\endlinenomath}%
\csappto{end#1*}{\endlinenomath}%
}
%% Definition of \linenomathAMS depends on whether the mathlines option is provided
\expandafter\ifx\linenomath\linenomathWithnumbers
\let\linenomathAMS\linenomathWithnumbers
%% The following line gets rid of an extra line numbers at the bottom:
\patchcmd\linenomathAMS{\advance\postdisplaypenalty\linenopenalty}{}{}{}
\else
\let\linenomathAMS\linenomathNonumbers
\fi
\linenomathpatch{equation}
\linenomathpatchAMS{gather}
\linenomathpatchAMS{multline}
\linenomathpatchAMS{align}
\linenomathpatchAMS{alignat}
\linenomathpatchAMS{flalign}
% Disable line numbering during measurement step of multline
\makeatletter
\patchcmd{\mmeasure@}{\measuring@true}{
\measuring@true
\ifnum-\linenopenaltypar>\interdisplaylinepenalty
\advance\interdisplaylinepenalty-\linenopenalty
\fi
}{}{}
\makeatother
% ---------------------------------------------------------------
% Modifications to LNCS template for review version
\makeatletter
\ifeccv@review
% Display line numbers
\AtBeginDocument{%
\linenumbers
\linenomathpatch{equation}%
\linenomathpatchAMS{gather}%
\linenomathpatchAMS{multline}%
\linenomathpatchAMS{align}%
\linenomathpatchAMS{alignat}%
\linenomathpatchAMS{flalign}%
}
% Crop the page for review version
\RequirePackage[width=122mm,left=12mm,paperwidth=146mm,height=193mm,top=12mm,paperheight=217mm]{geometry}
% Replace authors, institute, and running title with review placeholders
\let\maketitleold\maketitle
\renewcommand{\maketitle}{\author{Anonymous ECCV \eccv@year{} Submission}%
\titlerunning{ECCV \eccv@year{} Submission \#\eccv@ID}%
\authorrunning{ECCV \eccv@year{} Submission \#\eccv@ID}%
\institute{Paper ID \#\eccv@ID}%
\maketitleold}
\fi
\ifeccv@mobile
% Crop the page for mobile version
\RequirePackage[width=122mm,left=12mm,paperwidth=146mm,height=193mm,top=12mm,paperheight=217mm]{geometry}
\fi
% Macro for ECCV year in main text
\newcommand{\ECCVyear}{\eccv@year\xspace}
\makeatother
% ---------------------------------------------------------------
% Support for easy cross-referencing (e.g., \cref{eq:loss}, \cref{sec:intro})
% configured with \AtEndPreamble as it needs to be called after hyperref
\AtEndPreamble{
\usepackage[capitalize]{cleveref}
\crefname{section}{Sec.}{Secs.}
\Crefname{section}{Section}{Sections}
\crefname{table}{Tab.}{Tabs.}
\Crefname{table}{Table}{Tables}
}

View File

@@ -1,43 +0,0 @@
% ---------------------------------------------------------------
%
% Formatting Package for ECCV Submissions
%
% initially created for ECCV 2024
% by Stefan Roth
%
% based on previous ECCV templates:
% updated April 2002 by Antje Endemann
% Based on CVPR 07 and LNCS, with modifications by DAF, AZ and elle, 2008 and AA, 2010, and CC, 2011; TT, 2014; AAS, 2016; AAS, 2020; TH, 2022
%
% and the CVPR templates:
% https://github.com/cvpr-org/author-kit
%
% No guarantee is given that the format corresponds perfectly to
% LNCS Proceedings, but most features should be ok.
%
% ---------------------------------------------------------------
\NeedsTeXFormat{LaTeX2e}[1999/12/01]
\ProvidesPackage{eccvabbrv}[Common abbreviations for ECCV]
% Add a period to the end of an abbreviation unless there's one
% already, then \xspace.
\RequirePackage{xspace}
\makeatletter
\DeclareRobustCommand\onedot{\futurelet\@let@token\@onedot}
\def\@onedot{\ifx\@let@token.\else.\null\fi\xspace}
\def\eg{\emph{e.g}\onedot}
\def\Eg{\emph{E.g}\onedot}
\def\ie{\emph{i.e}\onedot}
\def\Ie{\emph{I.e}\onedot}
\def\cf{\emph{cf}\onedot}
\def\Cf{\emph{Cf}\onedot}
\def\etc{\emph{etc}\onedot}
\def\vs{\emph{vs}\onedot}
\def\wrt{w.r.t\onedot}
\def\dof{d.o.f\onedot}
\def\iid{i.i.d\onedot}
\def\wolog{w.l.o.g\onedot}
\def\etal{\emph{et al}\onedot}
\makeatother

View File

@@ -1,493 +0,0 @@
%!PS-Adobe-2.0 EPSF-1.2
%%Creator: MATLAB, The Mathworks, Inc.
%%Title: parz_sym.eps
%%CreationDate: 03/13/96 12:46:22
%%DocumentNeededFonts: Helvetica
%%DocumentProcessColors: Cyan Magenta Yellow Black
%%Pages: 1
%%BoundingBox: 59 192 549 590
%%EndComments
%%BeginProlog
% MathWorks dictionary
/MathWorks 150 dict begin
% definition operators
/bdef {bind def} bind def
/ldef {load def} bind def
/xdef {exch def} bdef
/xstore {exch store} bdef
% operator abbreviations
/c /clip ldef
/cc /concat ldef
/cp /closepath ldef
/gr /grestore ldef
/gs /gsave ldef
/mt /moveto ldef
/np /newpath ldef
/cm /currentmatrix ldef
/sm /setmatrix ldef
/rc {rectclip} bdef
/rf {rectfill} bdef
/rm /rmoveto ldef
/rl /rlineto ldef
/s /show ldef
/sc {setcmykcolor} bdef
/sr /setrgbcolor ldef
/w /setlinewidth ldef
/j /setlinejoin ldef
/cap /setlinecap ldef
% page state control
/pgsv () def
/bpage {/pgsv save def} bdef
/epage {pgsv restore} bdef
/bplot /gsave ldef
/eplot {stroke grestore} bdef
% orientation switch
/portraitMode 0 def
/landscapeMode 1 def
% coordinate system mappings
/dpi2point 0 def
% font control
/FontSize 0 def
/FMS {
/FontSize xstore %save size off stack
findfont
[FontSize 0 0 FontSize neg 0 0]
makefont
setfont
}bdef
/reencode {
exch dup where
{pop load} {pop StandardEncoding} ifelse
exch
dup 3 1 roll
findfont dup length dict begin
{ 1 index /FID ne {def}{pop pop} ifelse } forall
/Encoding exch def
currentdict
end
definefont pop
} bdef
/isroman {
findfont /CharStrings get
/Agrave known
} bdef
/FMSR {
3 1 roll 1 index
dup isroman
{reencode} {pop pop} ifelse
exch FMS
} bdef
/csm {
1 dpi2point div -1 dpi2point div scale
neg translate
landscapeMode eq {90 rotate} if
} bdef
% line types: solid, dotted, dashed, dotdash
/SO { [] 0 setdash } bdef
/DO { [.5 dpi2point mul 4 dpi2point mul] 0 setdash } bdef
/DA { [6 dpi2point mul] 0 setdash } bdef
/DD { [.5 dpi2point mul 4 dpi2point mul 6 dpi2point mul 4 dpi2point mul] 0 setdash } bdef
% macros for lines and objects
/L {
lineto
stroke
} bdef
/MP {
3 1 roll moveto
1 sub {rlineto} repeat
} bdef
/AP {
{rlineto} repeat
} bdef
/PP {
closepath fill
} bdef
/DP {
closepath stroke
} bdef
/MR {
4 -2 roll moveto
dup 0 exch rlineto
exch 0 rlineto
neg 0 exch rlineto
closepath
} bdef
/FR {
MR stroke
} bdef
/PR {
MR fill
} bdef
/L1i {
{ currentfile picstr readhexstring pop } image
} bdef
/tMatrix matrix def
/MakeOval {
newpath
tMatrix currentmatrix pop
translate scale
0 0 1 0 360 arc
tMatrix setmatrix
} bdef
/FO {
MakeOval
stroke
} bdef
/PO {
MakeOval
fill
} bdef
/PD {
2 copy moveto lineto stroke
} bdef
currentdict end def
%%EndProlog
%%BeginSetup
MathWorks begin
0 cap
end
%%EndSetup
%%Page: 1 1
%%BeginPageSetup
%%PageBoundingBox: 59 192 549 590
MathWorks begin
bpage
%%EndPageSetup
%%BeginObject: graph1 1
bplot
/dpi2point 12 def
portraitMode 0216 7344 csm
501 259 5882 4776 MR c np
76 dict begin %Colortable dictionary
/c0 { 0 0 0 sr} bdef
/c1 { 1 1 1 sr} bdef
/c2 { 1 0 0 sr} bdef
/c3 { 0 1 0 sr} bdef
/c4 { 0 0 1 sr} bdef
/c5 { 1 1 0 sr} bdef
/c6 { 1 0 1 sr} bdef
/c7 { 0 1 1 sr} bdef
%%IncludeResource: font Helvetica
/Helvetica /ISOLatin1Encoding 144 FMSR
1 j
c1
0 0 6912 5184 PR
6 w
DO
4 w
SO
6 w
c0
898 4612 mt 6254 4612 L
898 388 mt 6254 388 L
6254 4612 mt 6254 388 L
898 4612 mt 898 388 L
6254 4612 mt 6254 4612 L
898 4612 mt 898 4612 L
898 4612 mt 6254 4612 L
898 4612 mt 898 388 L
898 4612 mt 898 4612 L
898 4612 mt 898 4558 L
898 388 mt 898 442 L
734 4781 mt
(-25) s
1663 4612 mt 1663 4558 L
1663 388 mt 1663 442 L
1499 4781 mt
(-20) s
2428 4612 mt 2428 4558 L
2428 388 mt 2428 442 L
2264 4781 mt
(-15) s
3193 4612 mt 3193 4558 L
3193 388 mt 3193 442 L
3029 4781 mt
(-10) s
3959 4612 mt 3959 4558 L
3959 388 mt 3959 442 L
3835 4781 mt
(-5) s
4724 4612 mt 4724 4558 L
4724 388 mt 4724 442 L
4684 4781 mt
(0) s
5489 4612 mt 5489 4558 L
5489 388 mt 5489 442 L
5449 4781 mt
(5) s
6254 4612 mt 6254 4558 L
6254 388 mt 6254 442 L
6174 4781 mt
(10) s
898 4612 mt 952 4612 L
6254 4612 mt 6200 4612 L
783 4665 mt
(0) s
898 3767 mt 952 3767 L
6254 3767 mt 6200 3767 L
503 3820 mt
(0.005) s
898 2922 mt 952 2922 L
6254 2922 mt 6200 2922 L
583 2975 mt
(0.01) s
898 2078 mt 952 2078 L
6254 2078 mt 6200 2078 L
503 2131 mt
(0.015) s
898 1233 mt 952 1233 L
6254 1233 mt 6200 1233 L
583 1286 mt
(0.02) s
898 388 mt 952 388 L
6254 388 mt 6200 388 L
503 441 mt
(0.025) s
898 388 mt 6254 388 L
898 4612 mt 6254 4612 L
898 4612 mt 898 388 L
6254 4612 mt 6254 388 L
898 388 mt 898 388 L
6254 388 mt 6254 388 L
gs 898 388 5357 4225 MR c np
DA
16 0 15 0 15 0 16 0 15 0 15 0 15 0 16 0
15 0 15 0 16 0 15 0 15 0 16 0 15 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 16 0
15 0 15 0 15 0 16 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 1 16 0 15 0 15 0 15 0
16 0 15 0 15 1 16 0 15 0 15 1 16 0 15 1
15 0 15 1 16 1 15 0 15 1 16 1 15 2 15 1
16 1 15 2 15 2 15 2 16 3 15 3 15 3 16 3
15 4 15 4 16 5 15 5 15 6 16 6 15 7 15 8
15 8 16 9 15 10 15 11 16 12 15 13 15 14 16 16
15 16 15 18 15 19 16 21 15 22 15 24 16 25 15 27
15 29 16 31 15 32 15 35 15 36 16 39 15 40 15 43
16 45 15 47 15 49 4724 3846 100 MP stroke
16 51 15 53 15 55 15 58 16 59 15 61 15 63 16 65
15 67 15 68 16 70 15 71 15 72 16 74 15 74 15 75
15 77 16 76 15 77 15 77 16 77 15 77 15 77 16 76
15 76 15 75 15 73 16 73 15 71 15 70 16 68 15 66
15 65 16 63 15 60 15 59 15 56 16 54 15 52 15 49
16 47 15 44 15 42 16 39 15 37 15 34 16 32 15 29
15 27 15 24 16 22 15 20 15 17 16 15 15 12 15 11
16 8 15 5 15 4 15 1 16 -1 15 -4 15 -5 16 -8
15 -11 15 -12 16 -15 15 -17 15 -20 15 -22 16 -24 15 -27
15 -29 16 -32 15 -34 15 -37 16 -39 15 -42 15 -44 16 -47
15 -49 15 -52 15 -54 16 -56 15 -59 15 -60 16 -63 15 -65
15 -66 16 -68 15 -70 15 -71 15 -73 16 -73 15 -75 15 -76
16 -76 15 -77 15 -77 3209 2426 100 MP stroke
16 -77 15 -77 15 -77 15 -76 16 -77 15 -75 15 -74 16 -74
15 -72 15 -71 16 -70 15 -68 15 -67 16 -65 15 -63 15 -61
15 -59 16 -58 15 -55 15 -53 16 -51 15 -49 15 -47 16 -45
15 -43 15 -40 15 -39 16 -36 15 -35 15 -32 16 -31 15 -29
15 -27 16 -25 15 -24 15 -22 15 -21 16 -19 15 -18 15 -16
16 -16 15 -14 15 -13 16 -12 15 -11 15 -10 16 -9 15 -8
15 -8 15 -7 16 -6 15 -6 15 -5 16 -5 15 -4 15 -4
16 -3 15 -3 15 -3 15 -3 16 -2 15 -2 15 -2 16 -1
15 -1 15 -2 16 -1 15 -1 15 0 15 -1 16 -1 15 0
15 -1 16 0 15 -1 15 0 16 0 15 -1 15 0 15 0
16 0 15 0 15 0 16 0 15 -1 15 0 16 0 15 0
15 0 16 0 15 0 15 0 15 0 16 0 15 0 15 0
16 0 15 0 15 0 1694 4612 100 MP stroke
16 0 15 0 15 0 1648 4612 4 MP stroke
SO
16 0 15 0 15 0 16 0 15 0 15 0 15 0 16 0
15 0 15 0 16 0 15 0 15 0 16 0 15 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 16 0
15 0 15 0 15 0 16 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 15 0
16 0 15 0 15 0 16 0 15 0 15 0 16 0 15 0
15 0 15 0 16 0 15 0 15 0 16 0 15 0 15 0
16 0 15 0 15 0 15 0 16 0 15 0 15 0 16 0
15 0 15 0 16 0 15 0 15 0 16 0 15 0 15 0
15 0 16 0 15 0 15 1 16 0 15 0 15 0 16 0
15 0 15 0 15 1 16 0 15 0 15 1 16 0 15 1
15 0 16 1 15 0 15 1 15 1 16 1 15 2 15 1
16 1 15 2 15 2 4724 4596 100 MP stroke
16 2 15 3 15 2 15 4 16 3 15 4 15 4 16 5
15 5 15 5 16 7 15 7 15 7 16 9 15 9 15 10
15 11 16 12 15 12 15 14 16 15 15 17 15 17 16 19
15 21 15 22 15 23 16 25 15 27 15 28 16 30 15 32
15 34 16 35 15 38 15 39 15 41 16 43 15 46 15 47
16 49 15 50 15 53 16 54 15 56 15 57 16 59 15 60
15 62 15 62 16 64 15 64 15 65 16 65 15 65 15 66
16 65 15 65 15 64 15 63 16 62 15 61 15 59 16 57
15 55 15 53 16 50 15 48 15 44 15 42 16 38 15 35
15 31 16 27 15 23 15 19 16 15 15 11 15 6 16 2
15 -2 15 -6 15 -11 16 -15 15 -19 15 -23 16 -27 15 -31
15 -35 16 -38 15 -42 15 -44 15 -48 16 -50 15 -53 15 -55
16 -57 15 -59 15 -61 3209 2592 100 MP stroke
16 -62 15 -63 15 -64 15 -65 16 -65 15 -66 15 -65 16 -65
15 -65 15 -64 16 -64 15 -62 15 -62 16 -60 15 -59 15 -57
15 -56 16 -54 15 -53 15 -50 16 -49 15 -47 15 -46 16 -43
15 -41 15 -39 15 -38 16 -35 15 -34 15 -32 16 -30 15 -28
15 -27 16 -25 15 -23 15 -22 15 -21 16 -19 15 -17 15 -17
16 -15 15 -14 15 -12 16 -12 15 -11 15 -10 16 -9 15 -9
15 -7 15 -7 16 -7 15 -5 15 -5 16 -5 15 -4 15 -4
16 -3 15 -4 15 -2 15 -3 16 -2 15 -2 15 -2 16 -1
15 -1 15 -2 16 -1 15 -1 15 -1 15 0 16 -1 15 0
15 -1 16 0 15 -1 15 0 16 0 15 -1 15 0 15 0
16 0 15 0 15 0 16 0 15 -1 15 0 16 0 15 0
15 0 16 0 15 0 15 0 15 0 16 0 15 0 15 0
16 0 15 0 15 0 1694 4612 100 MP stroke
16 0 15 0 15 0 1648 4612 4 MP stroke
16 0 15 0 15 0 16 0 15 0 15 0 15 0 16 0
15 0 15 0 16 0 15 0 15 0 16 0 15 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 16 0
15 0 15 0 15 0 16 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 1 16 0 15 0 15 0 15 0
16 0 15 0 15 1 16 0 15 0 15 1 16 0 15 1
15 0 15 1 16 0 15 1 15 1 16 1 15 2 15 1
16 1 15 2 15 2 15 2 16 3 15 2 15 4 16 3
15 4 15 4 16 5 15 5 15 5 16 7 15 7 15 7
15 9 16 9 15 10 15 11 16 12 15 12 15 14 16 15
15 17 15 17 15 19 16 21 15 22 15 23 16 25 15 27
15 28 16 30 15 32 15 34 15 35 16 38 15 39 15 41
16 43 15 46 15 47 4724 3862 100 MP stroke
16 49 15 50 15 53 15 54 16 56 15 57 15 59 16 60
15 62 15 62 16 64 15 64 15 65 16 65 15 65 15 66
15 65 16 65 15 64 15 63 16 62 15 61 15 59 16 57
15 55 15 53 15 50 16 48 15 44 15 42 16 38 15 35
15 31 16 27 15 23 15 19 15 15 16 11 15 6 15 2
16 -2 15 -6 15 -11 16 -15 15 -19 15 -23 16 -27 15 -31
15 -35 15 -38 16 -42 15 -44 15 -48 16 -50 15 -53 15 -55
16 -57 15 -59 15 -61 15 -62 16 -63 15 -64 15 -65 16 -65
15 -66 15 -65 16 -65 15 -65 15 -64 15 -64 16 -62 15 -62
15 -60 16 -59 15 -57 15 -56 16 -54 15 -53 15 -50 16 -49
15 -47 15 -46 15 -43 16 -41 15 -39 15 -38 16 -35 15 -34
15 -32 16 -30 15 -28 15 -27 15 -25 16 -23 15 -22 15 -21
16 -19 15 -17 15 -17 3209 4446 100 MP stroke
16 -15 15 -14 15 -12 15 -12 16 -11 15 -10 15 -9 16 -9
15 -7 15 -7 16 -7 15 -5 15 -5 16 -5 15 -4 15 -4
15 -3 16 -4 15 -2 15 -3 16 -2 15 -2 15 -2 16 -1
15 -1 15 -2 15 -1 16 -1 15 -1 15 0 16 -1 15 0
15 -1 16 0 15 -1 15 0 15 0 16 -1 15 0 15 0
16 0 15 0 15 0 16 0 15 -1 15 0 16 0 15 0
15 0 15 0 16 0 15 0 15 0 16 0 15 0 15 0
16 0 15 0 15 0 15 0 16 0 15 0 15 0 16 0
15 0 15 0 16 0 15 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 15 0
16 0 15 0 15 0 16 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 0 15 0 16 0 15 0 15 0
16 0 15 0 15 0 1694 4612 100 MP stroke
16 0 15 0 15 0 1648 4612 4 MP stroke
DO
16 0 15 0 15 0 16 0 15 0 15 0 15 0 16 0
15 0 15 0 16 0 15 0 15 0 16 0 15 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 16 0
15 0 15 0 15 0 16 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 15 0
16 0 15 0 15 0 16 0 15 0 15 1 16 0 15 0
15 0 15 0 16 0 15 0 15 1 16 0 15 0 15 1
16 0 15 1 15 0 15 1 16 0 15 1 15 1 16 1
15 2 15 1 16 1 15 2 15 2 16 2 15 3 15 2
15 4 16 3 15 4 15 4 16 5 15 5 15 5 16 7
15 7 15 7 15 9 16 9 15 10 15 11 16 12 15 12
15 14 16 15 15 17 15 17 15 19 16 21 15 22 15 23
16 25 15 27 15 28 4724 4247 100 MP stroke
16 30 15 32 15 34 15 35 16 38 15 39 15 41 16 43
15 46 15 47 16 49 15 50 15 53 16 54 15 56 15 57
15 59 16 60 15 62 15 62 16 64 15 64 15 65 16 65
15 65 15 66 15 65 16 65 15 64 15 63 16 62 15 61
15 59 16 57 15 55 15 53 15 50 16 48 15 44 15 42
16 38 15 35 15 31 16 27 15 23 15 19 16 15 15 11
15 6 15 2 16 -2 15 -6 15 -11 16 -15 15 -19 15 -23
16 -27 15 -31 15 -35 15 -38 16 -42 15 -44 15 -48 16 -50
15 -53 15 -55 16 -57 15 -59 15 -61 15 -62 16 -63 15 -64
15 -65 16 -65 15 -66 15 -65 16 -65 15 -65 15 -64 16 -64
15 -62 15 -62 15 -60 16 -59 15 -57 15 -56 16 -54 15 -53
15 -50 16 -49 15 -47 15 -46 15 -43 16 -41 15 -39 15 -38
16 -35 15 -34 15 -32 3209 4217 100 MP stroke
16 -30 15 -28 15 -27 15 -25 16 -23 15 -22 15 -21 16 -19
15 -17 15 -17 16 -15 15 -14 15 -12 16 -12 15 -11 15 -10
15 -9 16 -9 15 -7 15 -7 16 -7 15 -5 15 -5 16 -5
15 -4 15 -4 15 -3 16 -4 15 -2 15 -3 16 -2 15 -2
15 -2 16 -1 15 -1 15 -2 15 -1 16 -1 15 -1 15 0
16 -1 15 0 15 -1 16 0 15 -1 15 0 16 0 15 -1
15 0 15 0 16 0 15 0 15 0 16 0 15 -1 15 0
16 0 15 0 15 0 15 0 16 0 15 0 15 0 16 0
15 0 15 0 16 0 15 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 0 16 0 15 0 15 0 15 0
16 0 15 0 15 0 16 0 15 0 15 0 16 0 15 0
15 0 16 0 15 0 15 0 15 0 16 0 15 0 15 0
16 0 15 0 15 0 1694 4612 100 MP stroke
16 0 15 0 15 0 1648 4612 4 MP stroke
0 -2703 4112 4612 2 MP stroke
0 -2703 3499 4612 2 MP stroke
0 -3823 3959 4612 2 MP stroke
SO
gr
3463 3236 mt 3535 3236 L
3499 3200 mt 3499 3272 L
gs 898 388 5357 4225 MR c np
gr
3923 3236 mt 3995 3236 L
3959 3200 mt 3959 3272 L
gs 898 388 5357 4225 MR c np
gr
3923 789 mt 3995 789 L
3959 753 mt 3959 825 L
3923 753 mt 3995 825 L
3995 753 mt 3923 825 L
gs 898 388 5357 4225 MR c np
gr
4076 2129 mt 4148 2201 L
4148 2129 mt 4076 2201 L
gs 898 388 5357 4225 MR c np
gr
3923 2129 mt 3995 2201 L
3995 2129 mt 3923 2201 L
gs 898 388 5357 4225 MR c np
gr
3423 5003 mt
(Xi) s
3867 5003 mt
(Xs) s
4050 5003 mt
(Xj) s
end
eplot
%%EndObject graph 1
epage
end
showpage
%%Trailer
%%EOF

Binary file not shown.

1448
ieeenat_fullname.bst Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

1218
llncs.cls

File diff suppressed because it is too large Load Diff

111
main.bib Normal file
View File

@@ -0,0 +1,111 @@
@String(PAMI = {IEEE Trans. Pattern Anal. Mach. Intell.})
@String(IJCV = {Int. J. Comput. Vis.})
@String(CVPR= {IEEE Conf. Comput. Vis. Pattern Recog.})
@String(ICCV= {Int. Conf. Comput. Vis.})
@String(ECCV= {Eur. Conf. Comput. Vis.})
@String(NIPS= {Adv. Neural Inform. Process. Syst.})
@String(ICPR = {Int. Conf. Pattern Recog.})
@String(BMVC= {Brit. Mach. Vis. Conf.})
@String(TOG= {ACM Trans. Graph.})
@String(TIP = {IEEE Trans. Image Process.})
@String(TVCG = {IEEE Trans. Vis. Comput. Graph.})
@String(TMM = {IEEE Trans. Multimedia})
@String(ACMMM= {ACM Int. Conf. Multimedia})
@String(ICME = {Int. Conf. Multimedia and Expo})
@String(ICASSP= {ICASSP})
@String(ICIP = {IEEE Int. Conf. Image Process.})
@String(ACCV = {ACCV})
@String(ICLR = {Int. Conf. Learn. Represent.})
@String(IJCAI = {IJCAI})
@String(PR = {Pattern Recognition})
@String(AAAI = {AAAI})
@String(CVPRW= {IEEE Conf. Comput. Vis. Pattern Recog. Worksh.})
@String(CSVT = {IEEE Trans. Circuit Syst. Video Technol.})
@String(SPL = {IEEE Sign. Process. Letters})
@String(VR = {Vis. Res.})
@String(JOV = {J. Vis.})
@String(TVC = {The Vis. Comput.})
@String(JCST = {J. Comput. Sci. Tech.})
@String(CGF = {Comput. Graph. Forum})
@String(CVM = {Computational Visual Media})
@String(PAMI = {IEEE TPAMI})
@String(IJCV = {IJCV})
@String(CVPR = {CVPR})
@String(ICCV = {ICCV})
@String(ECCV = {ECCV})
@String(NIPS = {NeurIPS})
@String(ICPR = {ICPR})
@String(BMVC = {BMVC})
@String(TOG = {ACM TOG})
@String(TIP = {IEEE TIP})
@String(TVCG = {IEEE TVCG})
@String(TCSVT = {IEEE TCSVT})
@String(TMM = {IEEE TMM})
@String(ACMMM = {ACM MM})
@String(ICME = {ICME})
@String(ICASSP= {ICASSP})
@String(ICIP = {ICIP})
@String(ACCV = {ACCV})
@String(ICLR = {ICLR})
@String(IJCAI = {IJCAI})
@String(PR = {PR})
@String(AAAI = {AAAI})
@String(CVPRW= {CVPRW})
@String(CSVT = {IEEE TCSVT})
@misc{Authors14,
author = {FirstName LastName},
title = {The frobnicatable foo filter},
note = {Face and Gesture submission ID 324. Supplied as supplemental material {\tt fg324.pdf}},
year = 2014
}
@misc{Authors14b,
author = {FirstName LastName},
title = {Frobnication tutorial},
note = {Supplied as supplemental material {\tt tr.pdf}},
year = 2014
}
@article{Alpher02,
author = {FirstName Alpher},
title = {Frobnication},
journal = PAMI,
volume = 12,
number = 1,
pages = {234--778},
year = 2002
}
@article{Alpher03,
author = {FirstName Alpher and FirstName Fotheringham-Smythe},
title = {Frobnication revisited},
journal = {Journal of Foo},
volume = 13,
number = 1,
pages = {234--778},
year = 2003
}
@article{Alpher04,
author = {FirstName Alpher and FirstName Fotheringham-Smythe and FirstName Gamow},
title = {Can a machine frobnicate?},
journal = {Journal of Foo},
volume = 14,
number = 1,
pages = {234--778},
year = 2004
}
@inproceedings{Alpher05,
author = {FirstName Alpher and FirstName Gamow},
title = {Can a computer frobnicate?},
booktitle = CVPR,
pages = {234--778},
year = 2005
}

94
main.brf Normal file
View File

@@ -0,0 +1,94 @@
\backcite {Sanderson2022}{{1}{1}{figure.caption.1}}
\backcite {Vezakis2024}{{1}{1}{figure.caption.1}}
\backcite {Wang2022b}{{1}{1}{figure.caption.1}}
\backcite {Carion2020}{{1}{1}{figure.caption.1}}
\backcite {Girshick2013}{{1}{1}{figure.caption.1}}
\backcite {He2017}{{1}{1}{figure.caption.1}}
\backcite {Dosovitskiy2021}{{1}{1}{figure.caption.1}}
\backcite {Liu2021}{{1}{1}{figure.caption.1}}
\backcite {Touvron2021b}{{1}{1}{figure.caption.1}}
\backcite {Khan2022}{{1}{1}{figure.caption.1}}
\backcite {Rangel2024}{{1}{1}{figure.caption.1}}
\backcite {Deng2009}{{1}{1}{figure.caption.1}}
\backcite {He2016}{{1}{1}{figure.caption.1}}
\backcite {Krizhevsky2012}{{1}{1}{figure.caption.1}}
\backcite {He2016}{{1}{1}{figure.caption.1}}
\backcite {Krizhevsky2012}{{1}{1}{figure.caption.1}}
\backcite {Touvron2022}{{1}{1}{figure.caption.1}}
\backcite {Wortsman2022}{{1}{1}{figure.caption.1}}
\backcite {Vaswani2017}{{1}{1}{figure.caption.1}}
\backcite {Dosovitskiy2021}{{1}{1}{figure.caption.1}}
\backcite {Carion2020}{{1}{1}{figure.caption.1}}
\backcite {Wang2022a}{{1}{1}{figure.caption.1}}
\backcite {Wortsman2022}{{1}{1}{figure.caption.1}}
\backcite {Yu2022}{{1}{1}{figure.caption.1}}
\backcite {Zong2022}{{1}{1}{figure.caption.1}}
\backcite {Shorten2019}{{1}{1}{figure.caption.1}}
\backcite {Xu2023d}{{1}{1}{figure.caption.1}}
\backcite {Ding2023a}{{1}{1}{figure.caption.1}}
\backcite {RojasGomez2023}{{1}{1}{figure.caption.1}}
\backcite {Ren2024}{{2}{1}{figure.caption.1}}
\backcite {Sun2024}{{2}{1}{figure.caption.1}}
\backcite {Suvorov2021}{{2}{1}{figure.caption.1}}
\backcite {Zhong2017}{{2}{2}{section*.3}}
\backcite {Liu2022d}{{2}{2}{section*.3}}
\backcite {Zhang2018a}{{2}{2}{section*.3}}
\backcite {Yun2019}{{2}{2}{section*.3}}
\backcite {Takahashi2018}{{2}{2}{section*.3}}
\backcite {Cubuk2018}{{2}{2}{section*.3}}
\backcite {Cubuk2019}{{2}{2}{section*.3}}
\backcite {Touvron2022}{{2}{2}{section*.3}}
\backcite {Shorten2019}{{2}{2}{section*.3}}
\backcite {Xu2023d}{{2}{2}{section*.3}}
\backcite {Ghiasi2020}{{2}{2}{section*.4}}
\backcite {Ghiasi2020}{{2}{2}{section*.4}}
\backcite {Shermaine2025}{{2}{2}{section*.4}}
\backcite {Ling2022}{{2}{2}{section*.4}}
\backcite {Werman2021}{{2}{2}{section*.4}}
\backcite {Hinterstoisser2019}{{2}{2}{section*.4}}
\backcite {Dwibedi2017}{{2}{2}{section*.4}}
\backcite {Ge2023}{{2}{2}{section*.4}}
\backcite {Kang2022}{{2}{2}{section*.4}}
\backcite {Hendrycks2019}{{2}{2}{section*.6}}
\backcite {Hendrycks2019}{{2}{2}{section*.6}}
\backcite {Li2023e}{{2}{2}{section*.6}}
\backcite {Zhang2024f}{{2}{2}{section*.6}}
\backcite {Geirhos2018}{{3}{2}{section*.6}}
\backcite {Xiao2020}{{3}{2}{section*.6}}
\backcite {Sun2024}{{3}{3.1}{subsection.3.1}}
\backcite {Ren2024}{{3}{3.1}{subsection.3.1}}
\backcite {Liu2023e}{{3}{3.1}{subsection.3.1}}
\backcite {Kirillov2023}{{3}{3.1}{subsection.3.1}}
\backcite {Suvorov2021}{{3}{3.1}{subsection.3.1}}
\backcite {Sun2024}{{3}{3.1}{subsection.3.1}}
\backcite {Touvron2022}{{4}{3.2}{subsection.3.2}}
\backcite {Suvorov2021}{{4}{1}{table.caption.7}}
\backcite {Suvorov2021}{{4}{1}{table.caption.7}}
\backcite {Sun2024}{{4}{1}{table.caption.7}}
\backcite {Le2015}{{4}{4.1}{subsection.4.1}}
\backcite {Sun2024}{{4}{4.1}{table.caption.8}}
\backcite {Suvorov2021}{{4}{4.1}{table.caption.8}}
\backcite {Suvorov2021}{{5}{2}{table.caption.8}}
\backcite {Bates1955}{{5}{4.1}{table.caption.9}}
\backcite {Nauen2025}{{5}{4.2}{table.caption.11}}
\backcite {Touvron2022}{{5}{4.2}{table.caption.11}}
\backcite {Dosovitskiy2021}{{5}{4.2}{table.caption.11}}
\backcite {Liu2021}{{5}{4.2}{table.caption.11}}
\backcite {He2016}{{5}{4.2}{table.caption.11}}
\backcite {Touvron2021b}{{5}{4.2}{table.caption.11}}
\backcite {Ge2023}{{6}{4.2}{table.caption.12}}
\backcite {Ghiasi2020}{{6}{4.2}{table.caption.12}}
\backcite {Shermaine2025}{{6}{4.2}{table.caption.12}}
\backcite {Maji2013}{{6}{4.2}{table.caption.13}}
\backcite {Dehghan2017}{{6}{4.2}{table.caption.13}}
\backcite {Nilsback2008}{{6}{4.2}{table.caption.13}}
\backcite {Kaur2017}{{6}{4.2}{table.caption.13}}
\backcite {Parkhi2012}{{6}{4.2}{table.caption.13}}
\backcite {Xiao2020}{{7}{4.3}{figure.caption.14}}
\backcite {Wang2024f}{{7}{4.3}{figure.caption.14}}
\backcite {Chattopadhay2018}{{7}{4.3}{figure.caption.15}}
\backcite {Selvaraju2016}{{7}{4.3}{figure.caption.15}}
\backcite {Sundararajan2017}{{7}{4.3}{figure.caption.15}}
\backcite {Selvaraju2016}{{7}{4.3}{figure.caption.15}}
\backcite {Chattopadhay2018}{{7}{4.3}{figure.caption.15}}
\backcite {Sundararajan2017}{{7}{4.3}{figure.caption.15}}

BIN
main.pdf Normal file

Binary file not shown.

140
main.tex
View File

@@ -1,102 +1,70 @@
\documentclass[runningheads]{llncs} % CVPR 2026 Paper Template; see https://github.com/cvpr-org/author-kit
% --------------------------------------------------------------- \documentclass[10pt,twocolumn,letterpaper]{article}
% Include basic ECCV package
% TODO REVIEW: Insert your submission number below by replacing '*****' %%%%%%%%% PAPER TYPE - PLEASE UPDATE FOR FINAL VERSION
% TODO FINAL: Comment out the following line for the camera-ready version % \usepackage{cvpr} % To produce the CAMERA-READY version
\usepackage[review,year=2026,ID=1741]{eccv} \usepackage[review]{cvpr} % To produce the REVIEW version
% % TODO FINAL: Un-comment the following line for the camera-ready version % \usepackage[pagenumbers]{cvpr} % To force page numbers, e.g. for an arXiv version
% \usepackage{eccv}
% OPTIONAL: Un-comment the following line for a version which is easier to read
% on small portrait-orientation screens (e.g., mobile phones, or beside other windows)
%\usepackage[mobile]{eccv}
% ---------------------------------------------------------------
% Other packages
% Commonly used abbreviations (\eg, \ie, \etc, \cf, \etal, etc.)
\usepackage{eccvabbrv}
% Include other packages here, before hyperref.
\usepackage{graphicx}
\usepackage{booktabs}
% The "axessiblity" package can be found at: https://ctan.org/pkg/axessibility?lang=en
\usepackage[accsupp]{axessibility} % Improves PDF readability for those with disabilities.
% ---------------------------------------------------------------
% Hyperref package
% It is strongly recommended to use hyperref, especially for the review version. % It is strongly recommended to use hyperref, especially for the review version.
% Please disable hyperref *only* if you encounter grave issues. % hyperref with option pagebackref eases the reviewers' job.
% hyperref with option pagebackref eases the reviewers' job, but should be disabled for the final version. % Please disable hyperref *only* if you encounter grave issues,
% e.g. with the file validation for the camera-ready version.
% %
% If you comment hyperref and then uncomment it, you should delete % If you comment hyperref and then uncomment it, you should delete *.aux before re-running LaTeX.
% main.aux before re-running LaTeX. % (Or just hit 'q' on the first LaTeX run, let it finish, and you should be clear).
% (Or just hit 'q' on the first LaTeX run, let it finish, and you \definecolor{cvprblue}{rgb}{0.21,0.49,0.74}
% should be clear). \usepackage[pagebackref,breaklinks,colorlinks,allcolors=cvprblue]{hyperref}
\input{packages}
% TODO FINAL: Comment out the following line for the camera-ready version %%%%%%%%% PAPER ID - PLEASE UPDATE
%\usepackage[pagebackref,breaklinks,colorlinks,citecolor=eccvblue]{hyperref} \def\paperID{4792} % *** Enter the Paper ID here
% TODO FINAL: Un-comment the following line for the camera-ready version \def\confName{CVPR}
\usepackage{hyperref} \def\confYear{2026}
\input{packages.tex}
% Support for ORCID icon
\usepackage{orcidlink}
%%%%%%%%% TITLE - PLEASE UPDATE
\newcommand{\schemename}{\textit{ForAug}\xspace} \newcommand{\schemename}{\textit{ForAug}\xspace}
\title{\schemename: Mitigating Biases and Improving Vision Transformer Training by Recombining Foregrounds and Backgrounds}
% \title{\schemename: Mitigating Biases and Improving ViT Training by Recombining Foregrounds and Backgrounds}
% \title{\LaTeX\ Author Guidelines for \confName~Proceedings}
%%%%%%%%% AUTHORS - PLEASE UPDATE
\author{
Tobias Christian Nauen\textsuperscript{\rm 1,\rm 2},
Brian Moser\textsuperscript{\rm 2},
Federico Raue\textsuperscript{\rm 2},
Stanislav Frolov\textsuperscript{\rm 2},
Andreas Dengel\textsuperscript{\rm 1,\rm 2} \\
\textsuperscript{\rm 1}RPTU University Kaiserslautern-Landau, Kaiserslautern, Germany \\
\textsuperscript{\rm 2}German Research Center for Artificial Intelligence (DFKI), Kaiserslautern, Germany \\
{\tt\small first\_second.last@dfki.de / first.last@dfki.de}
% For a paper whose authors are all at the same institution,
% omit the following lines up until the closing ``}''.
% Additional authors and addresses can be added with ``\and'',
% just like the second author.
% To save space, use either the email address or home page, not both
}
\begin{document} \begin{document}
% ---------------------------------------------------------------
% \title{\schemename: Recombining Foregrounds and Backgrounds to Improve Vision Transformer Training with Bias Mitigation}
\title{\schemename: Mitigating Biases in Image Classification via Controlled Image Compositions}
% TODO REVIEW: If the paper title is too long for the running head, you can set
% an abbreviated paper title here. If not, comment out.
\titlerunning{\schemename}
% TODO FINAL: Replace with your author list.
% Include the authors' OCRID for the camera-ready version, if at all possible.
\author{
Tobias Christian Nauen\inst{1,2}\orcidlink{0000-1111-2222-3333} \and
Brian Moser\inst{2}\orcidlink{1111-2222-3333-4444} \and
Federico Raue\inst{2}\orcidlink{2222--3333-4444-5555} \and \\
Stanislav Frolov\inst{2} \and
Andreas Dengel\inst{1,2}
}
% TODO FINAL: Replace with an abbreviated list of authors.
\authorrunning{T.~C.~Nauen et al.}
% First names are abbreviated in the running head.
% If there are more than two authors, 'et al.' is used.
% TODO FINAL: Replace with your institution list.
\institute{RPTU University Kaiserslautern-Landau, Kaiserslautern, Germany \and
German Research Center for Artificial Intelligence (DFKI), Kaiserslautern, Germany\\
\email{first\_second.last@dfki.de} / \email{first.last@dfki.de}
}
\maketitle \maketitle
\input{sec/abstract}
\input{sec/abstract.tex} \input{sec/intro}
\input{sec/intro.tex} \input{sec/related_work}
% \input{sec/intro_old.tex} \input{sec/method}
\input{sec/related_work.tex} \input{sec/experiments}
\input{sec/method.tex} % \input{sec/future_work}
\input{sec/experiments.tex} \input{sec/conclusion}
\input{sec/conclusion.tex} \input{sec/acks}
\input{sec/acks.tex} {
\small
\bibliographystyle{splncs04} \bibliographystyle{ieeenat_fullname}
\bibliography{../JabRef/main_bib} \bibliography{../JabRef/main_bib}
}
% \newpage % WARNING: do not forget to delete the supplementary pages from your submission
% \appendix % \appendix
% \input{sec/appendix.tex} % \onecolumn
% \input{sec/appendix}
\end{document} \end{document}

130
main_mine.tex Normal file
View File

@@ -0,0 +1,130 @@
\documentclass[letterpaper]{article} % DO NOT CHANGE THIS
\usepackage[submission]{aaai2026} % DO NOT CHANGE THIS
\usepackage{times} % DO NOT CHANGE THIS
\usepackage{helvet} % DO NOT CHANGE THIS
\usepackage{courier} % DO NOT CHANGE THIS
\usepackage[hyphens]{url} % DO NOT CHANGE THIS
\usepackage{graphicx} % DO NOT CHANGE THIS
\urlstyle{rm} % DO NOT CHANGE THIS
\def\UrlFont{\rm} % DO NOT CHANGE THIS
\usepackage{natbib} % DO NOT CHANGE THIS AND DO NOT ADD ANY OPTIONS TO IT
\usepackage{caption} % DO NOT CHANGE THIS AND DO NOT ADD ANY OPTIONS TO IT
\frenchspacing % DO NOT CHANGE THIS
\setlength{\pdfpagewidth}{8.5in} % DO NOT CHANGE THIS
\setlength{\pdfpageheight}{11in} % DO NOT CHANGE THIS
%
% These are recommended to typeset algorithms but not required. See the subsubsection on algorithms. Remove them if you don't have algorithms in your paper.
\usepackage{algorithm}
\usepackage{algorithmic}
%
% These are are recommended to typeset listings but not required. See the subsubsection on listing. Remove this block if you don't have listings in your paper.
\usepackage{newfloat}
\usepackage{listings}
\DeclareCaptionStyle{ruled}{labelfont=normalfont,labelsep=colon,strut=off} % DO NOT CHANGE THIS
\lstset{%
basicstyle={\footnotesize\ttfamily},% footnotesize acceptable for monospace
numbers=left,numberstyle=\footnotesize,xleftmargin=2em,% show line numbers, remove this entire line if you don't want the numbers.
aboveskip=0pt,belowskip=0pt,%
showstringspaces=false,tabsize=2,breaklines=true}
\floatstyle{ruled}
\newfloat{listing}{tb}{lst}{}
\floatname{listing}{Listing}
\input{packages}
%
% Keep the \pdfinfo as shown here. There's no need
% for you to add the /Title and /Author tags.
\pdfinfo{
/TemplateVersion (2026.1)
}
% DISALLOWED PACKAGES
% \usepackage{authblk} -- This package is specifically forbidden
% \usepackage{balance} -- This package is specifically forbidden
% \usepackage{color (if used in text)
% \usepackage{CJK} -- This package is specifically forbidden
% \usepackage{float} -- This package is specifically forbidden
% \usepackage{flushend} -- This package is specifically forbidden
% \usepackage{fontenc} -- This package is specifically forbidden
% \usepackage{fullpage} -- This package is specifically forbidden
% \usepackage{geometry} -- This package is specifically forbidden
% \usepackage{grffile} -- This package is specifically forbidden
% \usepackage{hyperref} -- This package is specifically forbidden
% \usepackage{navigator} -- This package is specifically forbidden
% (or any other package that embeds links such as navigator or hyperref)
% \indentfirst} -- This package is specifically forbidden
% \layout} -- This package is specifically forbidden
% \multicol} -- This package is specifically forbidden
% \nameref} -- This package is specifically forbidden
% \usepackage{savetrees} -- This package is specifically forbidden
% \usepackage{setspace} -- This package is specifically forbidden
% \usepackage{stfloats} -- This package is specifically forbidden
% \usepackage{tabu} -- This package is specifically forbidden
% \usepackage{titlesec} -- This package is specifically forbidden
% \usepackage{tocbibind} -- This package is specifically forbidden
% \usepackage{ulem} -- This package is specifically forbidden
% \usepackage{wrapfig} -- This package is specifically forbidden
% DISALLOWED COMMANDS
% \nocopyright -- Your paper will not be published if you use this command
% \addtolength -- This command may not be used
% \balance -- This command may not be used
% \baselinestretch -- Your paper will not be published if you use this command
% \clearpage -- No page breaks of any kind may be used for the final version of your paper
% \columnsep -- This command may not be used
% \newpage -- No page breaks of any kind may be used for the final version of your paper
% \pagebreak -- No page breaks of any kind may be used for the final version of your paperr
% \pagestyle -- This command may not be used
% \tiny -- This is not an acceptable font size.
% \vspace{- -- No negative value may be used in proximity of a caption, figure, table, section, subsection, subsubsection, or reference
% \vskip{- -- No negative value may be used to alter spacing above or below a caption, figure, table, section, subsection, subsubsection, or reference
\setcounter{secnumdepth}{2} %May be changed to 1 or 2 if section numbers are desired.
% The file aaai2026.sty is the style file for AAAI Press
% proceedings, working notes, and technical reports.
%
% Title
\newcommand{\name}{\textit{ForNet}\xspace}
\newcommand{\schemename}{\textit{ForAug}\xspace}
% Names: RecombiNet, RecombNet, ReMix, ReMixNet, FoReMix/ForeMix
%%%%%%%%% TITLE - PLEASE UPDATE
\title{\schemename: Recombining Foregrounds and Backgrounds to Improve Vision Transformer Training with Bias Mitigation}
%%%%%%%%% AUTHORS - PLEASE UPDATE
\author {
Tobias Christian Nauen\textsuperscript{\rm 1, \rm 2},
Brian Moser\textsuperscript{\rm 2},
Federico Raue\textsuperscript{\rm 2},
Stanislav Frolov\textsuperscript{\rm 2},
Andreas Dengel\textsuperscript{\rm 1, \rm 2}
}
\affiliations {
\textsuperscript{\rm 1}RPTU Kaiserslautern-Landau, Kaiserslautern, Germany \\
\textsuperscript{\rm 2}German Research Center for Artificial Intelligence (DFKI), Kaiserslautern, Germany \\
{\tt\small first\_second.last@dfki.de / first.last@dfki.de}
}
\begin{document}
\maketitle
\input{sec/abstract}
\input{sec/intro}
\input{sec/related_work}
\input{sec/method}
\input{sec/experiments}
% \input{sec/future_work}
\input{sec/conclusion}
\input{sec/acks}
\bibliography{../JabRef/main_bib}
% \newpage
% \onecolumn
% \appendix
% \input{sec/appendix}
\newpage
\input{sec/reproducability.tex}
\end{document}

51
preamble.tex Normal file
View File

@@ -0,0 +1,51 @@
%% This file contains a number of tweaks that are typically applied to the main document.
%% They are not enabled by default, but can be enabled by uncommenting the relevant lines.
%%
%% Inline annotations; for predefined colors, refer to "dvipsnames" in the xcolor package:
%% https://tinyurl.com/overleaf-colors
%%
\newcommand{\red}[1]{{\color{red}#1}}
\newcommand{\todo}[1]{{\color{red}#1}}
\newcommand{\TODO}[1]{\textbf{\color{red}[TODO: #1]}}
%%
%% disable for camera ready / submission by uncommenting these lines
%%
% \renewcommand{\TODO}[1]{}
% \renewcommand{\todo}[1]{#1}
%%
%% work harder in optimizing text layout. Typically shrinks text by 1/6 of page, enable
%% it at the very end of the writing process, when you are just above the page limit
%%
% \usepackage{microtype}
%%
%% fine-tune paragraph spacing
%%
% \renewcommand{\paragraph}[1]{\vspace{.5em}\noindent\textbf{#1.}}
%%
%% globally adjusts space between figure and caption
%%
% \setlength{\abovecaptionskip}{.5em}
%%
%% Allows "the use of \paper to refer to the project name"
%% with automatic management of space at the end of the word
%%
% \usepackage{xspace}
% \newcommand{\paper}{ProjectName\xspace}
%%
%% Commonly used math definitions
%%
% \DeclareMathOperator*{\argmin}{arg\,min}
% \DeclareMathOperator*{\argmax}{arg\,max}
%%
%% Tigthen underline
%%
% \usepackage{soul}
% \setuldepth{foobar}

137
rebuttal.tex Normal file
View File

@@ -0,0 +1,137 @@
\documentclass[10pt,twocolumn,letterpaper]{article}
\usepackage[rebuttal]{cvpr}
% Include other packages here, before hyperref.
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{booktabs}
% Import additional packages in the preamble file, before hyperref
\input{preamble}
% If you comment hyperref and then uncomment it, you should delete
% egpaper.aux before re-running latex. (Or just hit 'q' on the first latex
% run, let it finish, and you should be clear).
\definecolor{cvprblue}{rgb}{0.21,0.49,0.74}
\usepackage[pagebackref,breaklinks,colorlinks,allcolors=cvprblue]{hyperref}
% If you wish to avoid re-using figure, table, and equation numbers from
% the main paper, please uncomment the following and change the numbers
% appropriately.
%\setcounter{figure}{2}
%\setcounter{table}{1}
%\setcounter{equation}{2}
% If you wish to avoid re-using reference numbers from the main paper,
% please uncomment the following and change the counter value to the
% number of references you have in the main paper (here, 100).
%\makeatletter
%\apptocmd{\thebibliography}{\global\c@NAT@ctr 100\relax}{}{}
%\makeatother
%%%%%%%%% PAPER ID - PLEASE UPDATE
\def\paperID{*****} % *** Enter the Paper ID here
\def\confName{CVPR}
\def\confYear{2026}
\begin{document}
%%%%%%%%% TITLE - PLEASE UPDATE
\title{\LaTeX\ Guidelines for Author Response} % **** Enter the paper title here
\maketitle
\thispagestyle{empty}
\appendix
%%%%%%%%% BODY TEXT - ENTER YOUR RESPONSE BELOW
\section{Introduction}
After receiving paper reviews, authors may optionally submit a rebuttal to address the reviewers' comments, which will be limited to a {\bf one page} PDF file.
Please follow the steps and style guidelines outlined below for submitting your author response.
The author rebuttal is optional and, following similar guidelines to previous conferences, is meant to provide you with an opportunity to rebut factual errors or to supply additional information requested by the reviewers.
It is NOT intended to add new contributions (theorems, algorithms, experiments) that were absent in the original submission and NOT specifically requested by the reviewers.
You may optionally add a figure, graph, or proof to your rebuttal to better illustrate your answer to the reviewers' comments.
Per a passed 2018 PAMI-TC motion, reviewers should refrain from requesting significant additional experiments for the rebuttal or penalize for lack of additional experiments.
Authors should refrain from including new experimental results in the rebuttal, especially when not specifically requested to do so by the reviewers.
Authors may include figures with illustrations or comparison tables of results reported in the submission/supplemental material or in other papers.
Just like the original submission, the rebuttal must maintain anonymity and cannot include external links that reveal the author identity or circumvent the length restriction.
The rebuttal must comply with this template (the use of sections is not required, though it is recommended to structure the rebuttal for ease of reading).
%-------------------------------------------------------------------------
\subsection{Response length}
Author responses must be no longer than 1 page in length including any references and figures.
Overlength responses will simply not be reviewed.
This includes responses where the margins and formatting are deemed to have been significantly altered from those laid down by this style guide.
Note that this \LaTeX\ guide already sets figure captions and references in a smaller font.
%------------------------------------------------------------------------
\section{Formatting your Response}
{\bf Make sure to update the paper title and paper ID in the appropriate place in the tex file.}
All text must be in a two-column format.
The total allowable size of the text area is $6\frac78$ inches (17.46 cm) wide by $8\frac78$ inches (22.54 cm) high.
Columns are to be $3\frac14$ inches (8.25 cm) wide, with a $\frac{5}{16}$ inch (0.8 cm) space between them.
The top margin should begin 1 inch (2.54 cm) from the top edge of the page.
The bottom margin should be $1\frac{1}{8}$ inches (2.86 cm) from the bottom edge of the page for $8.5 \times 11$-inch paper;
for A4 paper, approximately $1\frac{5}{8}$ inches (4.13 cm) from the bottom edge of the page.
Please number any displayed equations.
It is important for readers to be able to refer to any particular equation.
Wherever Times is specified, Times Roman may also be used.
Main text should be in 10-point Times, single-spaced.
Section headings should be in 10 or 12 point Times.
All paragraphs should be indented 1 pica (approx.~$\frac{1}{6}$ inch or 0.422 cm).
Figure and table captions should be 9-point Roman type as in \cref{fig:onecol}.
List and number all bibliographical references in 9-point Times, single-spaced,
at the end of your response.
When referenced in the text, enclose the citation number in square brackets, for example~\cite{Alpher05}.
Where appropriate, include the name(s) of editors of referenced books.
\begin{figure}[t]
\centering
\fbox{\rule{0pt}{0.5in} \rule{0.9\linewidth}{0pt}}
%\includegraphics[width=0.8\linewidth]{egfigure.eps}
\caption{Example of caption. It is set in Roman so that mathematics
(always set in Roman: $B \sin A = A \sin B$) may be included without an
ugly clash.}
\label{fig:onecol}
\end{figure}
To avoid ambiguities, it is best if the numbering for equations, figures, tables, and references in the author response does not overlap with that in the main paper (the reviewer may wonder if you talk about \cref{fig:onecol} in the author response or in the paper).
See \LaTeX\ template for a workaround.
%-------------------------------------------------------------------------
\subsection{Illustrations, graphs, and photographs}
All graphics should be centered.
Please ensure that any point you wish to make is resolvable in a printed copy of the response.
Resize fonts in figures to match the font in the body text, and choose line widths which render effectively in print.
Readers (and reviewers), even of an electronic copy, may choose to print your response in order to read it.
You cannot insist that they do otherwise, and therefore must not assume that they can zoom in to see tiny details on a graphic.
When placing figures in \LaTeX, it is almost always best to use \verb+\includegraphics+, and to specify the figure width as a multiple of the line width as in the example below
{\small\begin{verbatim}
\usepackage{graphicx} ...
\includegraphics[width=0.8\linewidth]
{myfile.pdf}
\end{verbatim}
}
%%%%%%%%% REFERENCES
{
\small
\bibliographystyle{ieeenat_fullname}
\bibliography{main}
}
\end{document}

View File

@@ -1,27 +1,19 @@
% !TeX root = ../main.tex % !TeX root = ../main.tex
\begin{abstract} \begin{abstract}
% Transformers, particularly Vision Transformers (ViTs), have achieved state-of-the-art performance in large-scale image classification. Transformers, particularly Vision Transformers (ViTs), have achieved state-of-the-art performance in large-scale image classification.
% However, they often require large amounts of data and can exhibit biases, such as center or size bias, that limit their robustness and generalizability. However, they often require large amounts of data and can exhibit biases, such as center or size bias, that limit their robustness and generalizability.
% This paper introduces \schemename, a novel data augmentation operation that addresses these challenges by explicitly imposing invariances into the training data, which are otherwise part of the neural network architecture. This paper introduces \schemename, a novel data augmentation operation that addresses these challenges by explicitly imposing invariances into the training data, which are otherwise part of the neural network architecture.
% \schemename is constructed by using pretrained foundation models to separate and recombine foreground objects with different backgrounds. % This paper introduces \name, a novel dataset derived from ImageNet that addresses these challenges.
% This recombination step enables us to take fine-grained control over object position and size, as well as background selection. \schemename is constructed by using pretrained foundation models to separate and recombine foreground objects with different backgrounds.
% We demonstrate that using \schemename significantly improves the accuracy of ViTs and other architectures by up to 4.5 percentage points (p.p.) on ImageNet, which translates to 7.3 p.p. on downstream tasks. % enabling fine-grained control over image composition during training.
% Importantly, \schemename not only improves accuracy but also opens new ways to analyze model behavior and quantify biases. % Missing sentence here of how you use it to generate data in what way and with what purpose wrt to bias
% Namely, we introduce metrics for background robustness, foreground focus, center bias, and size bias and show that using \schemename during training substantially reduces these biases. This recombination step enables us to take fine-grained control over object position and size, as well as background selection.
% In summary, \schemename provides a valuable tool for analyzing and mitigating biases, enabling the development of more robust and reliable computer vision models. % It thus increases the data diversity and effective number of training samples.
% Our code and dataset are publicly available at \code{<url>}. We demonstrate that using \schemename significantly improves the accuracy of ViTs and other architectures by up to 4.5 percentage points (p.p.) on ImageNet, which translates to 7.3 p.p. on downstream tasks.
% Importantly, \schemename enables novel ways of analyzing model behavior and quantifying biases.
Large-scale image classification datasets exhibit strong compositional biases: objects tend to be centered, appear at characteristic scales, and co-occur with class-specific context. Importantly, \schemename not only improves accuracy but also opens new ways to analyze model behavior and quantify biases.
% Models can exploit these biases to achieve high in-distribution accuracy, yet remain brittle under distribution shifts. Namely, we introduce metrics for background robustness, foreground focus, center bias, and size bias and show that using \schemename during training substantially reduces these biases.
By exploiting such biases, models attain high in-distribution accuracy but remain fragile under distribution shifts. In summary, \schemename provides a valuable tool for analyzing and mitigating biases, enabling the development of more robust and reliable computer vision models.
To address this issue, we introduce \schemename, a controlled composition augmentation scheme that factorizes each training image into a \emph{foreground object} and a \emph{background} and recombines them to explicitly manipulate object position, object scale, and background identity.
\schemename uses off-the-shelf segmentation and inpainting models to (i) extract the foreground and synthesize a neutral background, and (ii) paste the foreground onto diverse neutral backgrounds before applying standard strong augmentation policies.
Compared to conventional augmentations and content-mixing methods, our factorization provides direct control knobs that break foreground-background correlations. % while preserving the label.
Across 10 architectures, \schemename improves ImageNet top-1 accuracy by up to 6 percentage points (p.p.) and yields gains of up to 7.3 p.p. on fine-grained downstream datasets.
Moreover, the same control knobs enable targeted diagnostic tests: we quantify background reliance, foreground focus, center bias, and size bias via controlled background swaps and position/scale sweeps, and show that training with \schemename substantially reduces these shortcut behaviors and significantly increases accuracy on standard distribution-shift benchmarks by up to $19$ p.p.
% Moreover, the same control knobs enable targeted diagnostic tests: we quantify background reliance, foreground focus, center bias, and size bias via controlled background swaps and position/scale sweeps, and show that training with \schemename substantially reduces these shortcut behaviors and significantly increases accuracy on standard distribution-shift benchmarks like ImageNet-A/-C/-R by up to $19$ p.p.
Our code and dataset are publicly available at \code{<url>}. Our code and dataset are publicly available at \code{<url>}.
\keywords{Data Augmentation \and Vision Transformer \and Robustness}
\end{abstract} \end{abstract}

View File

@@ -1,101 +1,6 @@
% !TeX root = ../supplementary.tex % !TeX root = ../supplementary.tex
\section{Training Setup}
\label{sec:training_setup}
\begin{table*}[h!]
\centering
\caption{Training setup and hyperparameters for our ImageNet training.}
\label{tab:in-setup}
\resizebox{\textwidth}{!}{
\begin{tabular}{lccc}
\toprule
Augmentation Pipeline: & Basic & 3-Augment~\cite{Touvron2022} & RandAugment~\cite{Touvron2021b} \\
\midrule
Image Resolution & \multicolumn{3}{c}{$224 \times 224$} \\
Epochs & \multicolumn{3}{c}{300} \\
Learning Rate & S/B: 1e-3, L: 5e-4 & 3e-3 & S/B: 1e-3, L: 5e-4 \\
Learning Rate Schedule & \multicolumn{3}{c}{cosine decay} \\
Batch Size & 1024 & 2048 & 1024 \\
GPUs & \multicolumn{3}{c}{$4\times$ NVIDIA A100/H100/H200} \\
Warmup Schedule & \multicolumn{3}{c}{linear} \\
Warmup Epochs & \multicolumn{3}{c}{3} \\
Weight Decay & 0.05 & 0.02 & 0.05 \\
Label Smoothing & \multicolumn{3}{c}{0.1} \\
Optimizer & AdamW & Lamb \cite{You2020} & AdamW \\
\midrule
Augmentations & \makecell{RandomResizedCrop \\ Horizontal Flip \\ ColorJitter} & \makecell{Resize \\ RandomCrop \\ Horizontal Flip \\ Grayscale \\ Solarize \\ Gaussian-Blur \\ Color Jitter} & \makecell{RandomResizedCrop \\ Horizontal Flip \\ RandomErase \cite{Zhong2020} \\ RandAugment \cite{Cubuk2020} \\ Color Jitter} \\
\bottomrule
\end{tabular}
}
\end{table*}
\begin{table}[h!]
\centering
\caption{Training setup for finetuning on different downstream datasets. Other settings are the same as in \Cref{tab:in-setup}. For finetuning, we always utilize 3-Augment and the related parameters from the \emph{ViT, Swin, ResNet} column of \Cref{tab:in-setup}}
\label{tab:downstream-setup}
\begin{tabular}{lcccc}
\toprule
Dataset & Batch Size & Epochs & Learning Rate & Num. GPUs \\
\midrule
Aircraft & 512 & 500 & 3e-4 & 2 \\
Cars & 1024 & 500 & 3e-4 & 4 \\
Flowers & 256 & 500 & 3e-4 & 1 \\
Food & 2048 & 100 & 3e-4 & 4 \\
Pets & 512 & 500 & 3e-4 & 2 \\
\bottomrule
\end{tabular}
\end{table}
On ImageNet, we test three different data augmentation pipelines and hyperparameter settings as shown in \Cref{tab:in-setup}: A basic pipeline, a pipeline using RandAugment based on the DeiT~\cite{Touvron2021b} setup and 3-Augment, as used in \cite{Touvron2022,Nauen2025}.
When comparing different architectures, ViT, Swin, and ResNet are trained with the 3-Augment pipeline and DeiT is trained with the RandAugment pipeline.
% On ImageNet we use the same training setup as \cite{Nauen2025} and \cite{Touvron2022} without pretraining for ViT, Swin, and ResNet.
% For DeiT, we train the same ViT architecture but using the data augmentation scheme and hyperparameters from \cite{Touvron2021b}.
As our focus is on evaluating the changes in accuracy due to \schemename, like \cite{Nauen2025}, we stick to one set of hyperparameters for all models.
We list the settings used for training on ImageNet in \Cref{tab:in-setup} and the ones used for finetuning those weights on the downstream datasets in \Cref{tab:downstream-setup}.
Our implementation is using PyTorch \cite{Paszke2019} and the \emph{timm} library \cite{Wightman2019} for model architectures and basic functions.
\begin{table*}[ht!]
\centering
\caption{Hardware and Software specifics used for both training and evaluation.}
\label{tab:hw-sw-versions}
\begin{tabular}{ll}
\toprule
Parameter & Value \\
\midrule
GPU & $4 \times$ NVIDIA A100/H100/H200 \\
CPU & 24 CPU cores (Intel Xenon) per GPU \\
Memory & up to 120 GB per GPU \\
Operating System & Enroot container for SLURM based on Ubuntu 24.04 LTS \\
Python & 3.12.3 \\
PyTorch & 2.7.0 \\
TorchVision & 0.22.0 \\
Timm & 1.0.15 \\
\bottomrule
\end{tabular}
\end{table*}
\Cref{tab:hw-sw-versions} lists the specific hardware we use, as well as versions of the relevant software packages.
\section{Resource Usage of \schemename}
To utilize the proposed \schemename, specific computational resources are necessary, particularly for computing and storing for the output of the segmentation stage and for on-the-fly processing of the recombination stage.
\paragraph{Segmentation.}
% While calculating the segmentations and infills takes a lot of compute, this is effort that has to be spent only once per dataset.
\schemename involves a computationally expensive segmentation and infill stage, which is a one-time calculation per dataset.
Once computed, the segmentation and infill results can be perpetually reused, amortizing the initial cost over all subsequent experiments and applications.
On NVIDIA H100 GPUs, the segmentation stage will compute at a rate of $374.3 \frac{\text{img}}{\text{GPU} \times \text{h}}$ when using Attentive Eraser or $5 338.6 \frac{\text{img}}{\text{GPU} \times \text{h}}$ for LaMa.
For ImageNet this comes down to just under 9 days (Attentive Eraser) or 16 hours (LaMa) on two 8 GPU nodes.
To facilitate immediate use and reproduction of results, we publicly provide the precalculated segmentation stage output for the ImageNet dataset for download\footnote{Link will go here.}.
The output of \schemename's segmentation step on ImageNet dataset requires 73 GB of additional disk space for the segmentation output, which is separate from the base 147 GB ImageNet size.
\paragraph{Recombination.}
The recombination step of \schemename is implemented as a based data loader operation.
It's thus offloaded to the CPU, where it can be heavily parallelized and thus only results in a very minor increase in the training step-time.
For example, using a ViT-B model on an NVIDIA A100 GPU, the average update step-time increased by $1\%$, from $528 \pm 2$ ms to $534 \pm 1$ ms.
\section{Extended Bates Distribution} \section{Extended Bates Distribution}
\label{apdx:bates-distribution}
\begin{figure}[h!] \begin{figure}[h!]
\centering \centering
\includegraphics[width=.5\columnwidth]{img/bates.pdf} \includegraphics[width=.5\columnwidth]{img/bates.pdf}
@@ -103,6 +8,27 @@ For example, using a ViT-B model on an NVIDIA A100 GPU, the average update step-
\label{fig:bates-pdf} \label{fig:bates-pdf}
\end{figure} \end{figure}
% Finally, we analyze the foreground object's positioning in the image.
% We utilize an extended Bates distribution to sample the position of the foreground object.
% The Bates distribution~\cite{Bates1955} with parameter $\eta \geq 1$ is the mean of $\eta$ independent uniformly distributed random variables \cite{Jonhson1995}.
% Therefore, the larger $\eta$, the more concentrated the distribution is around the center.
% We extend this concept to $\eta \leq -1$ by shifting the distribution away from the center and towards the edges.
% We extend this concept to $\eta \leq -1$ by defining
% \begin{align*}
% X \sim \text{Bates}(\eta) :\Leftrightarrow s(X) \sim \text{Bates}(-\eta)
% \end{align*}
% for $\eta \leq 1$ with $s$ being the sawtooth function on $[0, 1]$:
% \begin{align}
% s(x) = \begin{cases}
% x + 0.5 & \text{if } 0 < x < 0.5 \\
% x - 0.5 & \text{if } 0.5 \leq x \leq 1
% \end{cases}
% \end{align}
% Note that $s \circ s = \id$ on $[0, 1]$.
% This way, distributions with $\eta \leq -1$ are more concentrated around the borders.
% $\eta = 1$ and $\eta = -1$ both correspond to the uniform distribution.
% The PDF of this extended Bates distribution is visualized in \Cref{fig:bates-pdf}.
We introduce an extension of the Bates distribution~\cite{Bates1955} to include negative parameters, enabling sampling of foreground object positions away from the image center. We introduce an extension of the Bates distribution~\cite{Bates1955} to include negative parameters, enabling sampling of foreground object positions away from the image center.
The standard Bates distribution, for $\eta \in \N$, is defined as the mean of $\eta$ independent random variables drawn from a uniform distribution \cite{Jonhson1995}. The standard Bates distribution, for $\eta \in \N$, is defined as the mean of $\eta$ independent random variables drawn from a uniform distribution \cite{Jonhson1995}.
A larger $\eta$ value increases the concentration of samples around the distribution's mean, which in this case is the image center. A larger $\eta$ value increases the concentration of samples around the distribution's mean, which in this case is the image center.
@@ -125,304 +51,98 @@ This transformation inverts the distribution's concentration, shifting the proba
We visualize the distribution function of the extended Bates distribution in \Cref{fig:bates-pdf}. We visualize the distribution function of the extended Bates distribution in \Cref{fig:bates-pdf}.
Both $\eta = 1$ and $\eta = -1$ result in a uniform distribution across the image. Both $\eta = 1$ and $\eta = -1$ result in a uniform distribution across the image.
\section{Design Choices of \schemename} \section{Resource Usage of \schemename}
\label{sec:ablation} To utilize the proposed \schemename, specific computational resources are necessary, particularly for computing and storing for the output of the segmentation stage and for on-the-fly processing of the recombination stage.
We start by ablating the design choices of \schemename on TinyImageNet~\cite{Le2015}, a subset of ImageNet containing 200 categories with 500 images each. %, and Tiny\name, the application of \schemename to TinyImageNet. \paragraph{Segmentation.}
% \Cref{tab:ablation} presents the results of these ablations. % While calculating the segmentations and infills takes a lot of compute, this is effort that has to be spent only once per dataset.
\Cref{tab:ablation-segment} presents ablations for segmentation and \Cref{tab:ablation-recombine} for recombination. \schemename involves a computationally expensive segmentation and infill stage, which is a one-time calculation per dataset.
Once computed, the segmentation and infill results can be perpetually reused, amortizing the initial cost over all subsequent experiments and applications.
On NVIDIA H100 GPUs, the segmentation stage will compute at a rate of $374.3 \frac{\text{img}}{\text{GPU} \times \text{h}}$ when using Attentive Eraser or $5 338.6 \frac{\text{img}}{\text{GPU} \times \text{h}}$ for LaMa.
For ImageNet this comes down to just under 9 days (Attentive Eraser) or 16 hours (LaMa) on two 8 GPU nodes.
To facilitate immediate use and reproduction of results, we publicly provide the precalculated segmentation stage output for the ImageNet dataset for download\footnote{Link will go here.}.
The output of \schemename's segmentation step on ImageNet dataset requires 73 GB of additional disk space for the segmentation output, which is separate from the base 147 GB ImageNet size.
\begin{table} \paragraph{Recombination.}
\caption{Ablation of the design decisions in the segmentation phase of \schemename on TinyImageNet. The recombination step of \schemename is implemented as a based data loader operation.
The first line is our baseline, while the other lines are using \schemename. It's thus offloaded to the CPU, where it can be heavily parallelized and thus only results in a very minor increase in the training step-time.
We use basic settings with the \emph{same} background strategy during recombination for this experiment. For example, using a ViT-B model on an NVIDIA A100 GPU, the average update step-time increased by $1\%$, from $528 \pm 2$ ms to $534 \pm 1$ ms.
}
\label{tab:ablation-segment}
\section{Training Setup}
\label{sec:training_setup}
\begin{table*}[h!]
\centering \centering
\small \caption{Training setup and hyperparameters for our ImageNet training.}
% \resizebox{.9\columnwidth}{!}{ \label{tab:in-setup}
\begin{tabular}{llcc} \begin{tabular}{lcc}
\toprule \toprule
\multirow{2.5}{*}{\makecell{Detect. \\Prompt}} & \multirow{2.5}{*}{\makecell{Infill \\ Model}} & \multicolumn{2}{c}{TinyImageNet Accuracy [\%]} \\ Parameter & ViT, Swin, ResNet & DeiT \\
\cmidrule{3-4}
& & ViT-Ti & ViT-S \\
\midrule \midrule
\multicolumn{2}{l}{\textbf{TinyImageNet}} & $66.1 \pm 0.5$ & $68.3 \pm 0.7$ \\ Image Resolution & $224 \times 224$ & $224 \times 224$ \\
specific & LaMa \cite{Suvorov2022} & $65.5 \pm 0.4$ & $71.2 \pm 0.5$ \\ Epochs & 300 & 300 \\
general & \gtxt{LaMa \cite{Suvorov2022}} & $66.4 \pm 0.6$ & $72.9 \pm 0.6$ \\ Learning Rate & 3e-3 & S/B: 1e-3, L: 5e-4 \\
\gtxt{general} & Att. Eraser \cite{Sun2025} & $67.5 \pm 1.2$ & $72.4 \pm 0.5$ \\ Learning Rate Schedule & cosine decay & cosine decay \\
Batch Size & 2048 & 1024 \\
GPUs & $4\times$ NVIDIA A100/H100/H200 & $4\times$ NVIDIA A100/H100/H200 \\
Warmup Schedule & linear & linear \\
Warmup Epochs & 3 & 3 \\
Weight Decay & 0.02 & 0.05 \\
Label Smoothing & 0.1 & 0.1 \\
Optimizer & Lamb \cite{You2020} & AdamW \\
\cmidrule(r){1-1}
Data Augmentation Policy & \textbf{3-Augment \cite{Touvron2022}} & \textbf{DeiT \cite{Touvron2021b}} \\
Augmentations & \makecell{Resize \\ RandomCrop \\ HorizontalFlip \\ Grayscale \\ Solarize \\ GaussianBlur \\ ColorJitter \\ CutMix \cite{Yun2019}} & \makecell{RandomResizedCrop \\ HorizontalFlip \\ RandomErase \cite{Zhong2017} \\ RandAugment \cite{Cubuk2019} \\ ColorJitter \\ Mixup \cite{Zhang2018a} \\ CutMix \cite{Yun2019}} \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}
% } \end{table*}
\end{table}
\begin{table}[t] \begin{table}[h!]
\caption{Ablation of the recombination phase of \schemename on TinyImageNet (top) and ImageNet (bottom). The first experiments use the initial segmentation settings with LaMa \cite{Suvorov2022}.}
\label{tab:ablation-recombine}
\centering \centering
% \resizebox{.9\columnwidth}{!}{ \caption{Training setup for finetuning on different downstream datasets. Other settings are the same as in \Cref{tab:in-setup}. For finetuning, we always utilize 3-Augment and the related parameters from the \emph{ViT, Swin, ResNet} column of \Cref{tab:in-setup}}
\begin{tabular}{ccccccccccc} \label{tab:downstream-setup}
\begin{tabular}{lcccc}
\toprule \toprule
% FG. & Augment. & BG. & BG. & Edge & Original & \multicolumn{2}{c}{Accuracy [\%]} \\ Dataset & Batch Size & Epochs & Learning Rate & Num. GPUs \\
% Size & Order & Strat. & Prune & Smoothing & Mixing & ViT-Ti & ViT-S \\
\multirow{2.5}{*}{\makecell{FG. \\size}} & \multirow{2.5}{*}{\makecell{Augment.\\Order}} & \multirow{2.5}{*}{\makecell{BG\\Strat.}} & \multirow{2.5}{*}{\makecell{BG.\\Prune}} & \multirow{2.5}{*}{\makecell{Original\\Mixing}} & \multirow{2.5}{*}{\makecell{Edge\\Smooth.}} & \multicolumn{2}{c}{Accuracy [\%]} \\
\cmidrule{7-8}
& & & & & & ViT-Ti & ViT-S \\
\midrule \midrule
% TinyImageNet & & & & & & & $66.1\pm0.5$ & $68.3\pm0.7$ \\ Aircraft & 512 & 500 & 3e-4 & 2 \\
\multicolumn{6}{l}{\textbf{TinyImageNet}} & \gtxt{$66.1\pm0.5$} & \gtxt{$68.3\pm0.7$} \\ Cars & 1024 & 500 & 3e-4 & 4 \\
mean & crop$\to$paste & same & - & - & \gtxt{-} & $64.6\pm0.5$ & $70.0\pm0.6$ \\ Flowers & 256 & 500 & 3e-4 & 1 \\
range & \gtxt{crop$\to$paste} & \gtxt{same} & \gtxt{-} & \gtxt{-} & \gtxt{-} & $65.5\pm0.4$ & $71.2\pm0.5$ \\ Food & 2048 & 100 & 3e-4 & 4 \\
\midrule Pets & 512 & 500 & 3e-4 & 2 \\
% \gtxt{range} & \gtxt{crop$\to$paste} & \gtxt{same} & \gtxt{-} & \gtxt{-} & \gtxt{-} & $66.4\pm0.6$ & $72.9\pm0.6$ \\
{range} & {crop$\to$paste} & {same} & {-} & {-} & {-} & $67.5\pm1.2$ & $72.4\pm0.5$ \\
\gtxt{range} & paste$\to$crop & \gtxt{same} & \gtxt{-} & \gtxt{-} & \gtxt{-} & $67.1\pm1.2$ & $72.9\pm0.5$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & 1.0 & \gtxt{-} & \gtxt{-} & $67.0\pm1.2$ & $73.0\pm0.3$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & 0.8 & \gtxt{-} & \gtxt{-} & $67.2\pm1.2$ & $72.9\pm0.8$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & 0.6 & \gtxt{-} & \gtxt{-} & $67.5\pm1.0$ & $72.8\pm0.7$ \\
% \gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $\sigma_\text{max} = 2.0$ & \gtxt{-} & $67.2\pm0.4$ & $72.9\pm0.5$ \\
% \gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $\sigma_\text{max} = 4.0$ & \gtxt{-} & $65.9\pm0.5$ & $72.4\pm0.6$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $p=0.2$ & \gtxt{-} & $69.8\pm0.5$ & $75.0\pm0.3$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $p=0.33$ & \gtxt{-} & $69.5\pm0.4$ & $75.2\pm1.0$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $p=0.5$ & \gtxt{-} & $70.3\pm1.0$ & $74.2\pm0.2$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & linear & \gtxt{-} & $70.1\pm0.7$ & $74.9\pm0.8$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & reverse lin. & \gtxt{-} & $67.6\pm0.2$ & $73.2\pm0.3$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & cos & \gtxt{-} & $71.3\pm1.0$ & $75.7\pm0.8$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & \gtxt{cos} & $\sigma_\text{max} = 4.0$ & $70.0\pm0.8$ & $75.5\pm0.7$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & orig. & \gtxt{0.8} & \gtxt{cos} & \gtxt{$\sigma_\text{max} = 4.0$} & $67.2\pm0.9$ & $69.9\pm1.0$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & all & \gtxt{0.8} & \gtxt{cos} & \gtxt{$\sigma_\text{max} = 4.0$} & $70.1\pm0.7$ & $77.5\pm0.6$ \\
\midrule
\multicolumn{6}{l}{\textbf{ImageNet}} & \gtxt{-} & \gtxt{$79.1\pm0.1$} \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & \gtxt{cos} & \gtxt{-} & - & $80.5\pm0.1$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & \gtxt{cos} & $\sigma_\text{max} = 4.0$ & - & $80.7\pm0.1$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & all & \gtxt{0.8} & \gtxt{cos} & \gtxt{$\sigma_\text{max} = 4.0$} & - & $81.4\pm0.1$ \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}
% }
\end{table} \end{table}
On ImageNet we use the same training setup as \cite{Nauen2025} and \cite{Touvron2022} without pretraining for ViT, Swin, and ResNet.
For DeiT, we train the same ViT architecture but using the data augmentation scheme and hyperparameters from \cite{Touvron2021b}.
As our focus is on evaluating the changes in accuracy due to \schemename, like \cite{Nauen2025}, we stick to one set of hyperparameters for all models.
We list the settings used for training on ImageNet in \Cref{tab:in-setup} and the ones used for finetuning those weights on the downstream datasets in \Cref{tab:downstream-setup}.
Out implementation is using PyTorch \cite{Paszke2019} and the \emph{timm} library \cite{Wightman2019} for model architectures and basic functions.
\begin{table*}[h!]
\textbf{Prompt.}
% We present the ablation of our main design decisions in \Cref{tab:ablation}.
First, we evaluate the type of prompt used to detect the foreground object.
Here, the \emph{general} prompt, which contains the class and the more general object category, outperforms only having the class name (\emph{specific}).
\textbf{Inpainting.} Among inpainting models, Attentive Eraser~\cite{Sun2025} produces slightly better results compared to LaMa~\cite{Suvorov2022} ($+0.5$ p.p. on average).
For inpainting examples, see the supplementary material.
% (see the supplementary material for examples).
% When comparing the infill models, the GAN-based LaMa \cite{Suvorov2022} gets outperformed by the Attentive Eraser \cite{Sun2025}.
\textbf{Foreground size}
% We observe that LaMa's often infills unnatural textures compared to Attentive Eraser.
% The size of foreground objects during training has a significant impact on the performance.
% Here, using the greater variability of the \emph{range} strategy increases the performance by $\approx 1\%$ compared to the \emph{mean} strategy.
significantly impacts performance.
Employing a \emph{range} of sizes during recombination, rather than a fixed \emph{mean} size, boosts accuracy by approximately 1 p.p.
This suggests that the added variability is beneficial.
\textbf{Order of data augmentation.}
% (1) Applying the image crop related augmentations \emph{before} pasting the foreground object and the color-based ones \emph{after} pasting or (2) applying all data augmentations after pasting the foreground object.
% While results are ambiguous, we choose the second strategy, as it improves the performance of ViT-S, although not the one of ViT-Ti.
Applying all augmentations after foreground-background recombination (\emph{paste$\to$crop$\to$color}) improves ViT-S's performance compared to applying crop-related augmentations before pasting (\emph{crop$\to$paste$\to$color}).
ViT-Ti results are ambiguous.
\textbf{Background pruning.}
When it comes to the backgrounds to use, we test different pruning thresholds ($t_\text{prune}$) to exclude backgrounds with large inpainting.
% and only use backgrounds with an relative size of the infilled region of at most $t_\text{prune}$ (exclusive).
A threshold of $t_\text{prune}=1.0$ means that we use all backgrounds that are not fully infilled.
% We find that the background pruning does not significantly impact the models' performance.
% We choose $t_\text{prune}=0.8$ for the following experiments to exclude backgrounds that are mostly artificial.
Varying $t_\text{prune}$ has minimal impact.
We choose $t_\text{prune} = 0.8$ to exclude predominantly artificial backgrounds.
% One of the most important design decisions is the mixing of the original dataset with \name.
\textbf{Mixing} \schemename-augmented samples with the original ImageNet data proves crucial.
While constant and linear mixing schedules improve performance over no mixing by $2-3$ p.p. compared to only augmented samples, the cosine annealing schedule proves optimal, boosting accuracy by $3-4$ p.p.
\textbf{Edge smoothing.}
We evaluate the impact of using Gaussian blurring to smooth the edges of the foreground masks.
% Similarly, applying edge smoothing to foreground masks with Gaussian blurring actually hurts performance on Tiny\name, but slightly improves it on \name.
For larger models, this gives us a slight performance boost on the full ImageNet (second to last line in \Cref{tab:ablation-recombine}).
\textbf{Background strategy.}
Another point is the allowed choice of background image for each foreground object.
% We evaluate three different strategies.
% (1) Picking the background from which that specific foreground was originally extracted.
% The major difference to ImageNet when using this setup is the variability in size and position of the foreground object.
% (2) Picking a background that originally had a foreground object of the same class in it.
% Here, we have backgrounds where objects of this type can typically appear while also creating a wider variety of samples due to pairing each foreground object with different backgrounds each time.
% (3) Picking any background.
% This choice has the largest variety of backgrounds, but the backgrounds are not semantically related to the foreground object anymore.
% We find in \Cref{fig:bg-strategy} that choosing only a foreground's original background is the worst choice.
We compare using the original background, a background from the same class, and any background.
These strategies go from low diversity and high shared information content between the foreground and background to high diversity and low shared information content.
For \emph{ViT-Ti}, the latter two strategies perform comparably, while \emph{ViT-S} benefits from the added diversity of using any background.
The same is true when training on the full ImageNet.
\begin{table}
\caption{Accuracy of ViT-S on TinyImageNet (TIN) in percent using \schemename with different foreground position distributions by varying the Bates parameter $\eta$.
The best performance is achieved when using the uniform distribution ($\eta=1$) for training.}
\label{tbl:foreground-eta}
\centering \centering
\small \caption{Hardware and Software specifics used for both training and evaluation.}
% \resizebox{.9\columnwidth}{!}{ \label{tab:hw-sw-versions}
\begin{tabular}{ccccccc} \begin{tabular}{ll}
\toprule \toprule
\multirow{2.5}{*}{\makecell{Bates Parameter \\during training}} & \multirow{2.5}{*}{\makecell{TIN \\w/o \schemename}} & \multicolumn{5}{c}{TIN w/ \schemename} \\ Parameter & Value \\
\cmidrule(l){3-7}
& & $\eta=-3$ & $-2$ & $1/-1$ & $2$ & $3$ \\
\midrule \midrule
Baseline & 68.9 & 60.5 & 60.2 & 60.8 & 62.6 & 63.1 \\ GPU & NVIDIA A100/H100/H200 \\
$\eta=-3$ & 71.3 & 79.3 & 79.5 & 79.1 & 79.3 & 79.1 \\ CPU & 24 CPU cores (Intex Xenon) per GPU \\
$\eta=-2$ & 71.5 & 80.0 & 78.7 & 79.3 & 79.1 & 78.8 \\ Memory & up to 120GB per GPU \\
$\eta=1/-1$ & 72.3 & 79.5 & 78.9 & 80.2 & 79.7 & 80.4 \\ Operating System & Enroot container for SLURM based on Ubuntu 24.04 LTS \\
$\eta=2$ & 71.3 & 78.2 & 77.8 & 79.1 & 79.6 & 79.9 \\ Python & 3.12.3 \\
$\eta=3$ & 71.4 & 77.2 & 76.9 & 78.6 & 79.6 & 79.7 \\ PyTorch & 2.7.0 \\
TorchVision & 0.22.0 \\
Timm & 1.0.15 \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}
% } \end{table*}
\end{table} \Cref{tab:hw-sw-versions} lists the specific hardware we use, as well as versions of the relevant software packages.
\textbf{Foreground position.}
Finally, we analyze the foreground object's positioning in the image, using a
generalization of the Bates distribution~\cite{Bates1955} with parameter $\eta \in \Z$ (see \Cref{apdx:bates-distribution}).
The Bates distribution presents an easy way to sample from a bounded domain with just one hyperparameter that controls its concentration.
$\eta = 1/-1$ corresponds to the uniform distribution; $\eta > 1$ concentrates the distribution around the center; and for $\eta < -1$, the distribution is concentrated at the borders (see supplementary material for details).
% We utilize an extended Bates distribution to sample the position of the foreground object.
% The Bates distribution with parameter $\eta \geq 1$ is the mean of $\eta$ independent uniformly distributed random variables \cite{Jonhson1995}.
% The larger $\eta$, the more concentrated the distribution is at the center, $\eta < -1$ concentrates the distribution at the edges.
% We extend this concept to $\eta \leq -1$, shifting the distribution away from the center and towards the edges.
When sampling more towards the center of the image, the difficulty of the task is reduced, which reduces performance on TinyImageNet (\Cref{tbl:foreground-eta}).
This is reflected in the performance when evaluating using \schemename with $\eta=2$ and $\eta=3$ compared to $\eta=-1/1$.
We observe a similar reduction for $\eta < -1$.
% This experiment is conducted using the LaMa infill model.
\begin{table}[t]
\caption{Dataset statistics for TinyImageNet and ImageNet with and without \schemename. For \schemename we report the number of foreground/background pairs.}
\label{tab:dataset-stats}
\centering
% \resizebox{.5\columnwidth}{!}{
\begin{tabular}{l S[table-format=4.0] S[table-format=7.0] S[table-format=5.0]}
\toprule
Dataset & {Classes} & {\makecell{Training \\ Images}} & {\makecell{Validation \\ Images}} \\
\midrule
TinyImageNet & 200 & 100000 & 10000 \\
TinyImageNet + \schemename & 200 & 99404 & 9915 \\
ImageNet & 1000 & 1281167 & 50000 \\
ImageNet + \schemename & 1000 & 1274557 & 49751 \\
\bottomrule
\end{tabular}
% }
\end{table}
After fixing the optimal design parameters in \Cref{tab:ablation-segment,tab:ablation-recombine} (last rows), we run \schemename's segmentation step on the entire ImageNet dataset.
\Cref{tab:dataset-stats} shows the resulting dataset statistics.
% The slightly lower number of images in \name is due to \emph{Grounded SAM} returning no or invalid detections for some images.
The slightly reduced image count for \schemename is due to instances where Grounded SAM fails to produce valid segmentation masks.
\section{Robustness Evaluation on Corner-Cases}
\begin{table}[t]
\centering
\caption{Evaluation on the Corner-Cases dataset. Objects cut from ImageNet evaluation bounding boxes are pasted onto infilled backgrounds. Objects have three sizes: $56$px, $84$px, and $112$px. Objects are places in the center (CeX) or corner (CoX) of an image its original background (XxO) or a random background (XxR).}
\label{tab:corner-cases}
\resizebox{\textwidth}{!}{
\begin{tabular}{lcccccccccccccc}
\toprule
\multirow{4}{*}{Model} & \multirow{4}{*}{w/ \schemename} & \multicolumn{12}{c}{Corner Cases Accuracy [\%]} \\
\cmidrule(l){3-14}
& & \multicolumn{4}{c}{56} & \multicolumn{4}{c}{84} & \multicolumn{4}{c}{112} \\
\cmidrule(lr){3-6} \cmidrule(lr){7-10} \cmidrule(l){11-14}
& & CeO & CoO & CeR & CoR & CeO & CoO & CeR & CoR & CeO & CoO & CeR & CoR \\
\midrule
ViT-S & \xmark & $40.5 \pm 2.0$ & $28.6 \pm 0.8$ & $10.3 \pm 0.9$ & $6.4 \pm 0.2$ & $56.8 \pm 1.2$ & $47.6 \pm 1.0$ & $31.3 \pm 0.7$ & $25.5 \pm 0.5$ & $70.9 \pm 0.1$ & $66.9 \pm 1.6$ & $55.2 \pm 0.2$ & $51.1 \pm 0.8$ \\
ViT-S & \cmark & $49.4 \pm 0.6$ & $39.9 \pm 0.5$ & $22.7 \pm 0.4$ & $17.6 \pm 0.3$ & $66.3 \pm 0.3$ & $60.0 \pm 0.3$ & $47.7 \pm 0.7$ & $43.2 \pm 0.2$ & $76.5 \pm 0.2$ & $74.9 \pm 0.4$ & $66.8 \pm 0.6$ & $64.9 \pm 0.1$ \\
& & \grntxt{$+8.9$} & \grntxt{$+11.3$} & \grntxt{$+12.4$} & \grntxt{$+11.2$} & \grntxt{$+9.4$} & \grntxt{$+12.4$} & \grntxt{$+16.4$} & \grntxt{$+17.7$} & \grntxt{$+5.6$} & \grntxt{$+8.0$} & \grntxt{$+11.6$} & \grntxt{$+13.7$} \\
\cmidrule(r){1-2}
ViT-B & \xmark & $37.9 \pm 1.4$ & $29.3 \pm 0.7$ & $14.0 \pm 1.7$ & $11.9 \pm 1.1$ & $51.5 \pm 0.7$ & $45.0 \pm 0.8$ & $27.3 \pm 0.8$ & $26.3 \pm 0.8$ & $64.7 \pm 0.3$ & $61.8 \pm 0.6$ & $46.3 \pm 0.3$ & $45.5 \pm 0.5$ \\
ViT-B & \cmark & $50.4 \pm 0.8$ & $42.4 \pm 0.6$ & $26.5 \pm 0.6$ & $22.8 \pm 0.8$ & $65.3 \pm 0.9$ & $60.9 \pm 0.6$ & $47.6 \pm 0.3$ & $45.6 \pm 0.1$ & $75.7 \pm 0.6$ & $74.0 \pm 0.6$ & $65.7 \pm 0.7$ & $64.3 \pm 0.5$ \\
& & \grntxt{$+12.5$} & \grntxt{$+13.1$} & \grntxt{$+12.4$} & \grntxt{$+10.9$} & \grntxt{$+13.8$} & \grntxt{$+15.9$} & \grntxt{$+20.2$} & \grntxt{$+19.3$} & \grntxt{$+11.0$} & \grntxt{$+12.2$} & \grntxt{$+19.3$} & \grntxt{$+18.8$} \\
\cmidrule(r){1-2}
ViT-L & \xmark & $32.8 \pm 1.6$ & $24.8 \pm 1.1$ & $14.8 \pm 2.2$ & $9.7 \pm 1.2$ & $42.7 \pm 0.9$ & $33.8 \pm 0.7$ & $21.3 \pm 1.5$ & $16.3 \pm 1.0$ & $55.7 \pm 0.7$ & $49.7 \pm 0.7$ & $36.0 \pm 1.3$ & $32.5 \pm 0.9$ \\
ViT-L & \cmark & $45.7 \pm 0.6$ & $39.0 \pm 0.5$ & $25.6 \pm 0.6$ & $24.1 \pm 0.8$ & $59.1 \pm 0.3$ & $55.2 \pm 0.4$ & $41.9 \pm 1.0$ & $42.7 \pm 0.6$ & $71.4 \pm 0.3$ & $69.0 \pm 0.4$ & $60.7 \pm 1.0$ & $60.3 \pm 0.8$ \\
& & \grntxt{$+12.9$} & \grntxt{$+14.2$} & \grntxt{$+10.8$} & \grntxt{$+14.4$} & \grntxt{$+16.3$} & \grntxt{$+21.5$} & \grntxt{$+20.5$} & \grntxt{$+26.4$} & \grntxt{$+15.7$} & \grntxt{$+19.3$} & \grntxt{$+24.7$} & \grntxt{$+27.8$} \\
\cmidrule(r){1-2}
DeiT-S & \xmark & $46.3 \pm 0.7$ & $38.1 \pm 0.3$ & $13.1 \pm 0.5$ & $9.9 \pm 0.1$ & $62.8 \pm 0.4$ & $58.2 \pm 0.2$ & $37.1 \pm 0.7$ & $34.3 \pm 0.5$ & $73.3 \pm 0.2$ & $73.9 \pm 0.4$ & $58.8 \pm 0.4$ & $59.4 \pm 0.6$ \\
DeiT-S & \cmark & $44.7 \pm 1.4$ & $37.1 \pm 1.4$ & $15.6 \pm 1.3$ & $12.1 \pm 0.9$ & $62.1 \pm 1.2$ & $57.8 \pm 1.1$ & $41.6 \pm 1.1$ & $37.9 \pm 1.2$ & $73.2 \pm 0.7$ & $73.3 \pm 0.4$ & $62.3 \pm 0.7$ & $61.4 \pm 0.9$ \\
& & \rdtxt{$-1.6$} & \rdtxt{$-1.1$} & \grntxt{$+2.4$} & \grntxt{$+2.2$} & \rdtxt{$-0.7$} & \rdtxt{$-0.4$} & \grntxt{$+4.4$} & \grntxt{$+3.5$} & \gtxt{$-0.1$} & \rdtxt{$-0.6$} & \grntxt{$+3.5$} & \grntxt{$+2.0$} \\
\cmidrule(r){1-2}
DeiT-B & \xmark & $48.1 \pm 0.9$ & $40.4 \pm 2.0$ & $15.8 \pm 0.2$ & $12.9 \pm 0.6$ & $64.0 \pm 0.9$ & $59.5 \pm 1.3$ & $39.0 \pm 0.9$ & $37.2 \pm 0.8$ & $74.1 \pm 0.7$ & $74.8 \pm 0.7$ & $59.1 \pm 0.8$ & $60.0 \pm 0.6$ \\
DeiT-B & \cmark & $50.7 \pm 0.1$ & $44.0 \pm 0.4$ & $19.3 \pm 0.2$ & $16.3 \pm 0.2$ & $66.0 \pm 0.2$ & $62.0 \pm 0.3$ & $43.4 \pm 0.3$ & $40.9 \pm 0.4$ & $75.4 \pm 0.1$ & $76.4 \pm 0.3$ & $62.8 \pm 0.2$ & $63.9 \pm 0.2$ \\
& & \grntxt{$+2.6$} & \grntxt{$+3.6$} & \grntxt{$+3.5$} & \grntxt{$+3.5$} & \grntxt{$+2.0$} & \grntxt{$+2.5$} & \grntxt{$+4.4$} & \grntxt{$+3.8$} & \grntxt{$+1.3$} & \grntxt{$+1.6$} & \grntxt{$+3.8$} & \grntxt{$+3.9$} \\
\cmidrule(r){1-2}
DeiT-L & \xmark & $39.2 \pm 2.6$ & $32.6 \pm 1.5$ & $10.5 \pm 2.8$ & $9.1 \pm 2.3$ & $55.7 \pm 2.5$ & $51.0 \pm 2.7$ & $30.3 \pm 4.0$ & $29.5 \pm 3.9$ & $68.5 \pm 2.1$ & $68.1 \pm 1.7$ & $51.7 \pm 3.1$ & $52.1 \pm 2.7$ \\
DeiT-L & \cmark & $51.9 \pm 0.7$ & $46.6 \pm 0.5$ & $21.5 \pm 1.3$ & $19.0 \pm 1.2$ & $66.6 \pm 0.6$ & $64.1 \pm 0.7$ & $45.3 \pm 1.3$ & $43.6 \pm 1.1$ & $75.6 \pm 0.4$ & $77.3 \pm 0.4$ & $63.8 \pm 0.8$ & $65.4 \pm 0.6$ \\
& & \grntxt{$+12.8$} & \grntxt{$+14.0$} & \grntxt{$+11.0$} & \grntxt{$+9.9$} & \grntxt{$+11.0$} & \grntxt{$+13.1$} & \grntxt{$+15.0$} & \grntxt{$+14.1$} & \grntxt{$+7.1$} & \grntxt{$+9.2$} & \grntxt{$+12.1$} & \grntxt{$+13.4$} \\
\cmidrule(r){1-2}
Swin-Ti & \xmark & $41.2 \pm 1.8$ & $32.5 \pm 0.3$ & $17.4 \pm 2.6$ & $12.2 \pm 0.2$ & $60.0 \pm 1.6$ & $51.4 \pm 0.2$ & $39.6 \pm 2.6$ & $34.8 \pm 0.9$ & $71.7 \pm 0.8$ & $66.1 \pm 0.7$ & $58.2 \pm 1.1$ & $53.6 \pm 1.2$ \\
Swin-Ti & \cmark & $49.8 \pm 0.6$ & $42.8 \pm 0.7$ & $24.2 \pm 0.7$ & $21.4 \pm 0.9$ & $66.4 \pm 0.6$ & $60.5 \pm 0.2$ & $47.8 \pm 0.5$ & $44.6 \pm 0.5$ & $76.0 \pm 0.3$ & $72.7 \pm 0.2$ & $65.7 \pm 0.5$ & $62.1 \pm 0.3$ \\
& & \grntxt{$+8.5$} & \grntxt{$+10.3$} & \grntxt{$+6.8$} & \grntxt{$+9.2$} & \grntxt{$+6.4$} & \grntxt{$+9.2$} & \grntxt{$+8.2$} & \grntxt{$+9.8$} & \grntxt{$+4.3$} & \grntxt{$+6.5$} & \grntxt{$+7.5$} & \grntxt{$+8.5$} \\
\cmidrule(r){1-2}
Swin-S & \xmark & $41.3 \pm 0.6$ & $33.0 \pm 0.1$ & $18.4 \pm 0.7$ & $13.3 \pm 0.5$ & $59.2 \pm 0.1$ & $51.2 \pm 0.5$ & $39.1 \pm 0.2$ & $35.9 \pm 0.3$ & $71.5 \pm 0.2$ & $65.6 \pm 0.1$ & $56.8 \pm 0.5$ & $53.2 \pm 0.2$ \\
Swin-S & \cmark & $48.6 \pm 0.7$ & $39.9 \pm 1.6$ & $22.2 \pm 0.9$ & $16.8 \pm 1.1$ & $64.4 \pm 0.9$ & $57.9 \pm 1.5$ & $43.8 \pm 1.1$ & $42.3 \pm 1.0$ & $75.7 \pm 0.2$ & $71.8 \pm 0.8$ & $63.2 \pm 0.4$ & $60.6 \pm 0.6$ \\
& & \grntxt{$+7.3$} & \grntxt{$+7.0$} & \grntxt{$+3.8$} & \grntxt{$+3.6$} & \grntxt{$+5.1$} & \grntxt{$+6.7$} & \grntxt{$+4.7$} & \grntxt{$+6.4$} & \grntxt{$+4.2$} & \grntxt{$+6.2$} & \grntxt{$+6.4$} & \grntxt{$+7.4$} \\
\cmidrule(r){1-2}
ResNet50 & \xmark & $48.6 \pm 0.6$ & $35.1 \pm 0.4$ & $23.0 \pm 0.7$ & $13.0 \pm 0.3$ & $65.8 \pm 0.4$ & $58.2 \pm 0.3$ & $44.4 \pm 0.6$ & $38.1 \pm 0.5$ & $73.2 \pm 0.2$ & $69.9 \pm 0.2$ & $56.9 \pm 0.1$ & $56.9 \pm 0.1$ \\
ResNet50 & \cmark & $52.3 \pm 0.6$ & $39.5 \pm 0.1$ & $27.4 \pm 0.6$ & $17.6 \pm 0.1$ & $68.5 \pm 0.3$ & $61.9 \pm 0.1$ & $48.5 \pm 0.4$ & $43.7 \pm 0.3$ & $75.2 \pm 0.1$ & $72.4 \pm 0.1$ & $61.7 \pm 0.3$ & $61.7 \pm 0.3$ \\
& & \grntxt{$+3.7$} & \grntxt{$+4.4$} & \grntxt{$+4.4$} & \grntxt{$+4.6$} & \grntxt{$+2.8$} & \grntxt{$+3.8$} & \grntxt{$+4.2$} & \grntxt{$+5.5$} & \grntxt{$+2.0$} & \grntxt{$+2.5$} & \grntxt{$+4.8$} & \grntxt{$+4.8$} \\
\cmidrule(r){1-2}
ResNet101 & \xmark & $47.8 \pm 0.7$ & $37.2 \pm 0.5$ & $20.4 \pm 1.2$ & $14.2 \pm 0.3$ & $64.9 \pm 0.2$ & $58.6 \pm 0.5$ & $41.1 \pm 0.5$ & $38.3 \pm 0.7$ & $73.6 \pm 0.3$ & $70.5 \pm 0.3$ & $56.2 \pm 0.4$ & $57.0 \pm 0.5$ \\
ResNet101 & \cmark & $52.3 \pm 0.1$ & $42.2 \pm 0.1$ & $24.7 \pm 0.1$ & $19.2 \pm 0.4$ & $68.8 \pm 0.6$ & $62.9 \pm 0.3$ & $46.4 \pm 1.5$ & $44.3 \pm 0.9$ & $76.0 \pm 0.4$ & $73.7 \pm 0.3$ & $61.0 \pm 1.2$ & $62.6 \pm 0.5$ \\
& & \grntxt{$+4.4$} & \grntxt{$+5.0$} & \grntxt{$+4.3$} & \grntxt{$+5.0$} & \grntxt{$+3.9$} & \grntxt{$+4.3$} & \grntxt{$+5.3$} & \grntxt{$+6.0$} & \grntxt{$+2.4$} & \grntxt{$+3.2$} & \grntxt{$+4.7$} & \grntxt{$+5.7$} \\
\bottomrule
\end{tabular}
}
\end{table}
\Cref{tab:corner-cases} reports accuracy on the corner-cases dataset~\cite{Fatima2025} for models trained with and without \schemename.
The dataset is constructed by pasting objects cropped by their full bounding boxes (which are available for the ImageNet validation set) onto 224$\times$224 infilled backgrounds.
The dataset has three factors: foreground size (56, 84, 112 pixels), spatial position (center, CeX, vs.\ corner, CoX), and background type (original image background, XxO, vs.\ a random background, XxR), yielding $3 \times 2 \times 2$ controlled configurations per model.
Across all architectures, training with \schemename consistently improves robustness to these composition shifts.
For ViT-S/B/L, gains range from roughly $+8$ to over $+27$ percentage points, with the largest improvements occurring in the most challenging settings with foregrounds placed in corners on random backgrounds (e.g., CoR and CeR).
Swin and ResNet models also benefit across all configurations, with increases typically between $+3$ and $+10$ points.
DeiT-S shows small drops on some same-background center cases (CeO/CoO), but still improves notably on random-background conditions (XxR), while DeiT-B/L gain across nearly all settings.
Three trends are apparent.
First, all baselines perform substantially worse when moving from original to random backgrounds and from centered to corner placements, indicating strong background and center biases.
Second, \schemename reduces this sensitivity: the absolute gap between center and corner, and between original and random backgrounds, shrinks for almost all models and sizes.
Third, the relative improvements are especially pronounced for smaller objects and off-center placements, suggesting that \schemename makes models more foreground-focused and less reliant on canonical object scale and position.
\section{\schemename Segmentation Samples}
\begin{figure}[t!]
\centering
\begin{subfigure}{.49\textwidth}
\includegraphics[width=\textwidth]{img/masked_image_examples_train.pdf}
\end{subfigure}
\hfill
\begin{subfigure}{.49\textwidth}
\includegraphics[width=\textwidth]{img/masked_image_examples.pdf}
\end{subfigure}
\caption{ImageNet validation samples (left) and training samples (right) of our segmentation masks with annotated bounding boxes.}
\label{fig:mask-examples}
\end{figure}
We show examples of the automatically generated segmentation masks for a diverse subset of object categories (``ant,'' ``busby,'' ``bell cote,'' ``pickelhaube,'' ``snorkel,'' ``stove,'' ``tennis ``ball,'' and ``volleyball'').
Note that ``busby,'' ``bell cote,'' ``pickelhaube,'' and ``snorkel'' are the four classes with the \textbf{worst} mean box precision and box-to-box IoU on the validation set.
\Cref{fig:mask-examples} (right) illustrates masks from the evaluation split, while \Cref{fig:mask-examples} (left) shows examples from the training split.
Across both sets, the masks accurately isolate foreground objects with clean boundaries, despite large variations in object scale, shape, and appearance, supporting their use for background removal and resampling in our training pipeline.
We find that the main failure cases are:
(\textit{i}) When the ground-truth annotation corresponds to only a part of an object, the predicted mask often expands to cover the entire object rather than the annotated region.
See for example ``busby'' or ``bell cote''.
(\textit{ii}) In images containing multiple instances, some objects may be missed, resulting in incomplete foreground coverage.
This is especially visible for ``busby'' and ``pickelhaube''.
However, note that especially for ``pickelhaube'' the training distribution is noticeably different from the validation distribution, showing many images with just the head instead of groups of people wearing it.
(\textit{iii}) In rare cases, the predicted mask degenerates and covers nearly the entire image, effectively eliminating the background.
This happens in $<10\%$ of all training images, and we do not use the resulting backgrounds for recombination (see \Cref{apdx:infill-ratio}).
\section{\schemename Sample Images} \section{\schemename Sample Images}
\begin{table*}[t!] \begin{table*}[h!]
\centering \centering
\caption{Sample Images from using \schemename on ImageNet.} \caption{Sample Images from using \schemename on ImageNet.}
\label{tbl:example-images} \label{tbl:example-images}
@@ -479,7 +199,7 @@ Images show a broad range of spatial placements and scales for the same object,
\end{tabular} \end{tabular}
} }
\end{table*} \end{table*}
We visualize example infilled images for both LaMa \cite{Suvorov2022} and Attentive Eraser \cite{Sun2025} in \Cref{tab:infill-examples}. We visualize example infilled images for both LaMa \cite{Suvorov2021} and Attentive Eraser \cite{Sun2024} in \Cref{tab:infill-examples}.
The sidebyside examples show that both methods generally produce visually consistent infills, with many pairs appearing extremely similar at a glance. The sidebyside examples show that both methods generally produce visually consistent infills, with many pairs appearing extremely similar at a glance.
We qualitatively find that Attentive Eraser yields slightly sharper textures or more coherent local structure, while LaMa sometimes produces smoother or more homogenized regions. We qualitatively find that Attentive Eraser yields slightly sharper textures or more coherent local structure, while LaMa sometimes produces smoother or more homogenized regions.
Across the table, finedetail areas such as foliage, bark, and ground textures reveal the most noticeable differences between the two methods. Across the table, finedetail areas such as foliage, bark, and ground textures reveal the most noticeable differences between the two methods.
@@ -488,7 +208,6 @@ Across the table, finedetail areas such as foliage, bark, and ground textures
\FloatBarrier \FloatBarrier
\newpage \newpage
\section{Image Infill Ratio} \section{Image Infill Ratio}
\label{apdx:infill-ratio}
\begin{table*}[h!] \begin{table*}[h!]
\centering \centering
\caption{Example infills with a large relative foreground area size that is infilled (infill ratio).} \caption{Example infills with a large relative foreground area size that is infilled (infill ratio).}

View File

@@ -1,21 +1,11 @@
% !TeX root = ../main.tex % !TeX root = ../main.tex
\section{Conclusion \& Future Work} \section{Discussion \& Conclusion}
\label{sec:conclusion} \label{sec:conclusion}
% We introduce \schemename, a novel data augmentation scheme that facilitates improved Transformer training for image classification. We introduce \schemename, a novel data augmentation scheme that facilitates improved Transformer training for image classification.
% By explicitly separating and recombining foreground objects and backgrounds, \schemename enables controlled data augmentation beyond existing image compositions, leading to significant performance gains on ImageNet and downstream fine-grained classification tasks. By explicitly separating and recombining foreground objects and backgrounds, \schemename enables controlled data augmentation beyond existing image compositions, leading to significant performance gains on ImageNet and downstream fine-grained classification tasks.
% Furthermore, \schemename provides a powerful framework for analyzing model behavior and quantifying biases, including background robustness, foreground focus, center bias, and size bias. Furthermore, \schemename provides a powerful framework for analyzing model behavior and quantifying biases, including background robustness, foreground focus, center bias, and size bias.
% Our experiments demonstrate that training using \schemename not only boosts accuracy but also significantly reduces these biases, resulting in more robust and generalizable models. Our experiments demonstrate that training using \schemename not only boosts accuracy but also significantly reduces these biases, resulting in more robust and generalizable models.
% In the future, we see \schemename be also applied to other datasets and tasks, like video recognition or segmentation. In the future, we see \schemename be also applied to other datasets and tasks, like video recognition or segmentation.
% \schemename's ability to both improve performance and provide insights into model behavior makes it a valuable tool for advancing CV research and developing more reliable AI systems. \schemename's ability to both improve performance and provide insights into model behavior makes it a valuable tool for advancing CV research and developing more reliable AI systems.
We introduced \schemename, a controlled composition augmentation scheme that factorizes images into foreground objects and backgrounds and recombines them with explicit control over background identity, object position, and object scale.
% Empirically, \schemename consistently improves clean accuracy and robustness across architectures and scales.
Across diverse architectures, training with \schemename on top of standard strong augmentations yields substantial gains on ImageNet (up to $+6$ p.p.) and fine-grained downstream tasks (up to $+7.3$ p.p.), and consistently improves robustness on well-recognized benchmarks (up to $+19$ p.p.).
\schemename's compositional controls additionally provide a framework for analyzing model behavior and quantify biases, including background robustness, foreground focus, center bias, and size bias.
This dual role of \schemename as both a training mechanism and an evaluation tool highlights the value of explicit compositional factorization in understanding and improving image classifiers.
In future work, we aim to extend controlled composition beyond classification to multi-object and dense prediction settings, including detection, segmentation, and video recognition.
% By coupling performance gains with interpretable, controllable evaluations, \schemename offers a practical data-centric tool for advancing robust and reliable computer vision systems.
More generally, we believe that designing augmentations around explicitly controllable and interpretable generative setups is a promising direction for building robust and reliable vision systems.

View File

@@ -1,50 +1,231 @@
% !TeX root = ../main.tex % !TeX root = ../main.tex
\section{Experiments}
\label{sec:experiments}
\begin{figure}[t] % \begin{itemize}
\begin{minipage}[t]{.62\textwidth} % \item [1.] Training on RecombiNet
\captionof{table}{ImageNet results when training ViTs with different data augmentation pipelines. % \item ImageNet results (large)
\schemename consistently improves performance in low- and mid-augmentation regimes and remains complementary to strong augmentation pipelines, with larger gains for larger models. % \item Ablation (TinyImageNet): Foreground position
% \item Ablation (TinyImageNet): Which background (or part of other ablation table?)
% \item Ablation (TinyImageNet+ImageNet For edge blur): Design decisions: Which infill model, pruning threshold, p$\to$t /t$\to$p, foreground rotation range (?), edge blur, original image probability/schedule, Foreground size
% \item With other Data Augmentations
% \item [2.] More evalution metrics
% \item Background accuracy (how to frame/sell? Background bias?) / Background robustness (= foreground with all background)?
% \item Foreground focus
% \item Position bias
% \item Size bias
% \end{itemize}
We conduct a comprehensive suit of experiments to validate the effectiveness of our approach,
% We compare training on \name, the ImageNet instantiation of \schemename, to training on ImageNet for 10 different models.
comparing ImageNet-training with and without \schemename for 10 different models.
Furthermore, we assess the impact of using \schemename for pretraining on multiple fine-grained downstream datasets.
Finally, we exploit \schemename's control over the image distribution to quantify model behaviors and biases.
We always report the mean and standard deviation of three independent training runs.
\subsection{Design Choices of ForAug}
\label{sec:ablation}
We start by ablating the design choices of \schemename on TinyImageNet~\cite{Le2015}, a subset of ImageNet containing 200 categories with 500 images each. %, and Tiny\name, the application of \schemename to TinyImageNet.
% \Cref{tab:ablation} presents the results of these ablations.
\Cref{tab:ablation-segment} presents ablations for segmentation and \Cref{tab:ablation-recombine} for recombination.
\begin{table}
\caption{Ablation of the design decisions in the segmentation phase of \schemename on TinyImageNet.
The first line is our baseline, while the other lines are using \schemename.
We use basic settings with the \emph{same} background strategy during recombination for this experiment.
} }
\label{tab:imagenet-pipelines} \label{tab:ablation-segment}
\centering \centering
\resizebox{\textwidth}{!}{ \small
\begin{tabular}{lccccc} \resizebox{.9\columnwidth}{!}{
\begin{tabular}{cccc}
\toprule \toprule
\multirow{2.5}{*}{Augmentation} & \multirow{2.5}{*}{MixUp} & \multirow{2.5}{*}{CutMix} & \multicolumn{3}{c}{Accuracy [\%] using} \\ \multirow{2.5}{*}{\makecell{Detect. \\Prompt}} & \multirow{2.5}{*}{\makecell{Infill \\ Model}} & \multicolumn{2}{c}{TinyImageNet Accuracy [\%]} \\
\cmidrule(l){4-6} \cmidrule{3-4}
& & & ViT-S & ViT-B & ViT-L \\ & & ViT-Ti & ViT-S \\
\midrule \midrule
Basic & \xmark & \xmark & $71.9 \pm 0.1$ & $69.5 \pm 0.2$ & $68.3 \pm 0.4$ \\ \multicolumn{2}{l}{\textbf{TinyImageNet}} & $66.1 \pm 0.5$ & $68.3 \pm 0.7$ \\
Basic + \schemename & \xmark & \xmark & $75.7 \pm 0.2$ & $75.5 \pm 0.6$ & $73.1 \pm 1.7$ \\ specific & LaMa \cite{Suvorov2021} & $65.5 \pm 0.4$ & $71.2 \pm 0.5$ \\
& & & \grntxt{$+3.8$} & \grntxt{$+6.0$} & \grntxt{$+4.8$} \\ general & \gtxt{LaMa \cite{Suvorov2021}} & $66.4 \pm 0.6$ & $72.9 \pm 0.6$ \\
\midrule \gtxt{general} & Att. Eraser \cite{Sun2024} & $67.5 \pm 1.2$ & $72.4 \pm 0.5$ \\
RandAugment & \xmark & \xmark & $76.3 \pm 0.5$ & $75.5 \pm 0.2$ & $74.7 \pm 0.4$ \\
RandAugment + \schemename & \xmark & \xmark & $78.0 \pm 0.1$ & $77.8 \pm 0.1$ & $78.0 \pm 0.6$ \\
& & & \grntxt{$+1.7$} & \grntxt{$+2.3$} & \grntxt{$+3.3$} \\
\midrule
Basic & \cmark & \cmark & $79.8 \pm 0.3$ & $78.6 \pm 0.4$ & $78.1 \pm 1.6$ \\
Basic + \schemename & \cmark & \cmark & $79.8 \pm 0.3$ & $81.6 \pm 0.5$ & $81.0 \pm 0.4$ \\
& & & \gtxt{$\pm 0.0$} & \grntxt{$+3.0$} & \grntxt{$+2.9$} \\
\midrule
3-Augment & \xmark & \cmark & $79.1 \pm 0.1$ & $77.6 \pm 0.2$ & $75.3 \pm 0.4$ \\
3-Augment + \schemename & \xmark & \cmark & $81.4 \pm 0.1$ & $81.1 \pm 0.4$ & $79.8 \pm 0.1$ \\
& & & \grntxt{$+2.3$} & \grntxt{$+3.5$} & \grntxt{$+4.5$} \\
\midrule
RandAugment & \cmark & \cmark & $80.1 \pm 0.1$ & $81.9 \pm 0.3$ & $79.3 \pm 2.3$ \\
RandAugment + \schemename & \cmark & \cmark & $80.0 \pm 0.3$ & $81.9 \pm 0.2$ & $82.4 \pm 0.1$ \\
& & & \gtxt{$-0.1$} & \gtxt{$\pm 0.0$} & \grntxt{$+3.1$} \\
\bottomrule \bottomrule
\end{tabular} \end{tabular}}
} \end{table}
\end{minipage}
\hfill \begin{table}[t]
\begin{minipage}[t]{.37\textwidth} \caption{Ablation of the recombination phase of \schemename on TinyImageNet (top) and ImageNet (bottom). The first experiments use the initial segmentation settings with LaMa \cite{Suvorov2021}.}
\captionof{table}{ImageNet results of models trained on ImageNet with and without \schemename. \schemename improves the performance of most models, with a larger gain for larger models.} \label{tab:ablation-recombine}
\label{tab:imagenet-results} \centering
\resizebox{\textwidth}{!}{\begin{tabular}{lccc} \resizebox{\columnwidth}{!}{
\begin{tabular}{ccccccccccc}
\toprule \toprule
\multirow{2.5}{*}{Model} & \multicolumn{2}{c}{\makecell{Accuracy [\%]}} & \multirow{2.5}{*}{Delta} \\ % FG. & Augment. & BG. & BG. & Edge & Original & \multicolumn{2}{c}{Accuracy [\%]} \\
% Size & Order & Strat. & Prune & Smoothing & Mixing & ViT-Ti & ViT-S \\
\multirow{2.5}{*}{\makecell{FG. \\size}} & \multirow{2.5}{*}{\makecell{Augment.\\Order}} & \multirow{2.5}{*}{\makecell{BG\\Strat.}} & \multirow{2.5}{*}{\makecell{BG.\\Prune}} & \multirow{2.5}{*}{\makecell{Original\\Mixing}} & \multirow{2.5}{*}{\makecell{Edge\\Smooth.}} & \multicolumn{2}{c}{Accuracy [\%]} \\
\cmidrule{7-8}
& & & & & & ViT-Ti & ViT-S \\
\midrule
% TinyImageNet & & & & & & & $66.1\pm0.5$ & $68.3\pm0.7$ \\
\multicolumn{6}{l}{\textbf{TinyImageNet}} & \gtxt{$66.1\pm0.5$} & \gtxt{$68.3\pm0.7$} \\
mean & crop$\to$paste & same & - & - & \gtxt{-} & $64.6\pm0.5$ & $70.0\pm0.6$ \\
range & \gtxt{crop$\to$paste} & \gtxt{same} & \gtxt{-} & \gtxt{-} & \gtxt{-} & $65.5\pm0.4$ & $71.2\pm0.5$ \\
\midrule
% \gtxt{range} & \gtxt{crop$\to$paste} & \gtxt{same} & \gtxt{-} & \gtxt{-} & \gtxt{-} & $66.4\pm0.6$ & $72.9\pm0.6$ \\
{range} & {crop$\to$paste} & {same} & {-} & {-} & {-} & $67.5\pm1.2$ & $72.4\pm0.5$ \\
\gtxt{range} & paste$\to$crop & \gtxt{same} & \gtxt{-} & \gtxt{-} & \gtxt{-} & $67.1\pm1.2$ & $72.9\pm0.5$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & 1.0 & \gtxt{-} & \gtxt{-} & $67.0\pm1.2$ & $73.0\pm0.3$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & 0.8 & \gtxt{-} & \gtxt{-} & $67.2\pm1.2$ & $72.9\pm0.8$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & 0.6 & \gtxt{-} & \gtxt{-} & $67.5\pm1.0$ & $72.8\pm0.7$ \\
% \gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $\sigma_\text{max} = 2.0$ & \gtxt{-} & $67.2\pm0.4$ & $72.9\pm0.5$ \\
% \gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $\sigma_\text{max} = 4.0$ & \gtxt{-} & $65.9\pm0.5$ & $72.4\pm0.6$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $p=0.2$ & \gtxt{-} & $69.8\pm0.5$ & $75.0\pm0.3$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $p=0.33$ & \gtxt{-} & $69.5\pm0.4$ & $75.2\pm1.0$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & $p=0.5$ & \gtxt{-} & $70.3\pm1.0$ & $74.2\pm0.2$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & linear & \gtxt{-} & $70.1\pm0.7$ & $74.9\pm0.8$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & reverse lin. & \gtxt{-} & $67.6\pm0.2$ & $73.2\pm0.3$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & cos & \gtxt{-} & $71.3\pm1.0$ & $75.7\pm0.8$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & \gtxt{cos} & $\sigma_\text{max} = 4.0$ & $70.0\pm0.8$ & $75.5\pm0.7$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & orig. & \gtxt{0.8} & \gtxt{cos} & \gtxt{$\sigma_\text{max} = 4.0$} & $67.2\pm0.9$ & $69.9\pm1.0$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & all & \gtxt{0.8} & \gtxt{cos} & \gtxt{$\sigma_\text{max} = 4.0$} & $70.1\pm0.7$ & $77.5\pm0.6$ \\
\midrule
\multicolumn{6}{l}{\textbf{ImageNet}} & \gtxt{-} & \gtxt{$79.1\pm0.1$} \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & \gtxt{cos} & \gtxt{-} & - & $80.5\pm0.1$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & \gtxt{same} & \gtxt{0.8} & \gtxt{cos} & $\sigma_\text{max} = 4.0$ & - & $80.7\pm0.1$ \\
\gtxt{range} & \gtxt{paste$\to$crop} & all & \gtxt{0.8} & \gtxt{cos} & \gtxt{$\sigma_\text{max} = 4.0$} & - & $81.4\pm0.1$ \\
\bottomrule
\end{tabular}}
\end{table}
\textbf{Prompt.}
% We present the ablation of our main design decisions in \Cref{tab:ablation}.
First, we evaluate the type of prompt used to detect the foreground object.
Here, the \emph{general} prompt, which contains the class and the more general object category, outperforms only having the class name (\emph{specific}).
\textbf{Inpainting.} Among inpainting models, Attentive Eraser~\cite{Sun2024} produces slightly better results compared to LaMa~\cite{Suvorov2021} ($+0.5$ p.p. on average).
For inpainting examples, see the supplementary material.
% (see the supplementary material for examples).
% When comparing the infill models, the GAN-based LaMa \cite{Suvorov2021} gets outperformed by the Attentive Eraser \cite{Sun2024}.
\textbf{Foreground size}
% We observe that LaMa's often infills unnatural textures compared to Attentive Eraser.
% The size of foreground objects during training has a significant impact on the performance.
% Here, using the greater variability of the \emph{range} strategy increases the performance by $\approx 1\%$ compared to the \emph{mean} strategy.
significantly impacts performance.
Employing a \emph{range} of sizes during recombination, rather than a fixed \emph{mean} size, boosts accuracy by approximately 1 p.p.
This suggests that the added variability is beneficial.
\textbf{Order of data augmentation.}
% (1) Applying the image crop related augmentations \emph{before} pasting the foreground object and the color-based ones \emph{after} pasting or (2) applying all data augmentations after pasting the foreground object.
% While results are ambiguous, we choose the second strategy, as it improves the performance of ViT-S, although not the one of ViT-Ti.
Applying all augmentations after foreground-background recombination (\emph{paste$\to$crop$\to$color}) improves ViT-S's performance compared to applying crop-related augmentations before pasting (\emph{crop$\to$paste$\to$color}).
ViT-Ti results are ambiguous.
\textbf{Background pruning.}
When it comes to the backgrounds to use, we test different pruning thresholds ($t_\text{prune}$) to exclude backgrounds with large inpainting.
% and only use backgrounds with an relative size of the infilled region of at most $t_\text{prune}$ (exclusive).
A threshold of $t_\text{prune}=1.0$ means that we use all backgrounds that are not fully infilled.
% We find that the background pruning does not significantly impact the models' performance.
% We choose $t_\text{prune}=0.8$ for the following experiments to exclude backgrounds that are mostly artificial.
Varying $t_\text{prune}$ has minimal impact.
We choose $t_\text{prune} = 0.8$ to exclude predominantly artificial backgrounds.
% One of the most important design decisions is the mixing of the original dataset with \name.
\textbf{Mixing} \schemename-augmented samples with the original ImageNet data proves crucial.
While constant and linear mixing schedules improve performance over no mixing by $2-3$ p.p. compared to only augmented samples, the cosine annealing schedule proves optimal, boosting accuracy by $3-4$ p.p.
\textbf{Edge smoothing.}
We evaluate the impact of using Gaussian blurring to smooth the edges of the foreground masks.
% Similarly, applying edge smoothing to foreground masks with Gaussian blurring actually hurts performance on Tiny\name, but slightly improves it on \name.
For larger models, this gives us a slight performance boost on the full ImageNet (second to last line in \Cref{tab:ablation-recombine}).
\textbf{Background strategy.}
Another point is the allowed choice of background image for each foreground object.
% We evaluate three different strategies.
% (1) Picking the background from which that specific foreground was originally extracted.
% The major difference to ImageNet when using this setup is the variability in size and position of the foreground object.
% (2) Picking a background that originally had a foreground object of the same class in it.
% Here, we have backgrounds where objects of this type can typically appear while also creating a wider variety of samples due to pairing each foreground object with different backgrounds each time.
% (3) Picking any background.
% This choice has the largest variety of backgrounds, but the backgrounds are not semantically related to the foreground object anymore.
% We find in \Cref{fig:bg-strategy} that choosing only a foreground's original background is the worst choice.
We compare using the original background, a background from the same class, and any background.
These strategies go from low diversity and high shared information content between the foreground and background to high diversity and low shared information content.
For \emph{ViT-Ti}, the latter two strategies perform comparably, while \emph{ViT-S} benefits from the added diversity of using any background.
The same is true when training on the full ImageNet.
\begin{table}
\caption{Accuracy of ViT-S on TinyImageNet (TIN) in percent using \schemename with different foreground position distributions by varying the Bates parameter $\eta$.
The best performance is achieved when using the uniform distribution ($\eta=1$) for training.}
\label{tbl:foreground-eta}
\centering
\small
\resizebox{.9\columnwidth}{!}{
\begin{tabular}{ccccccc}
\toprule
\multirow{2.5}{*}{\makecell{Bates Parameter \\during training}} & \multirow{2.5}{*}{\makecell{TIN \\w/o \schemename}} & \multicolumn{5}{c}{TIN w/ \schemename} \\
\cmidrule(l){3-7}
& & $\eta=-3$ & $-2$ & $1/-1$ & $2$ & $3$ \\
\midrule
Baseline & 68.9 & 60.5 & 60.2 & 60.8 & 62.6 & 63.1 \\
$\eta=-3$ & 71.3 & 79.3 & 79.5 & 79.1 & 79.3 & 79.1 \\
$\eta=-2$ & 71.5 & 80.0 & 78.7 & 79.3 & 79.1 & 78.8 \\
$\eta=1/-1$ & 72.3 & 79.5 & 78.9 & 80.2 & 79.7 & 80.4 \\
$\eta=2$ & 71.3 & 78.2 & 77.8 & 79.1 & 79.6 & 79.9 \\
$\eta=3$ & 71.4 & 77.2 & 76.9 & 78.6 & 79.6 & 79.7 \\
\bottomrule
\end{tabular}}
\end{table}
\textbf{Foreground position.}
Finally, we analyze the foreground object's positioning in the image, using a
generalization of the Bates distribution~\cite{Bates1955} with parameter $\eta \in \Z$.
The Bates distribution presents an easy way to sample from a bounded domain with just one hyperparameter that controls its concentration.
$\eta = 1/-1$ corresponds to the uniform distribution; $\eta > 1$ concentrates the distribution around the center; and for $\eta < -1$, the distribution is concentrated at the borders (see supplementary material for details).
% We utilize an extended Bates distribution to sample the position of the foreground object.
% The Bates distribution with parameter $\eta \geq 1$ is the mean of $\eta$ independent uniformly distributed random variables \cite{Jonhson1995}.
% The larger $\eta$, the more concentrated the distribution is at the center, $\eta < -1$ concentrates the distribution at the edges.
% We extend this concept to $\eta \leq -1$, shifting the distribution away from the center and towards the edges.
When sampling more towards the center of the image, the difficulty of the task is reduced, which reduces performance on TinyImageNet (\Cref{tbl:foreground-eta}).
This is reflected in the performance when evaluating using \schemename with $\eta=2$ and $\eta=3$ compared to $\eta=-1/1$.
We observe a similar reduction for $\eta < -1$.
% This experiment is conducted using the LaMa infill model.
\begin{table}
\caption{Dataset statistics for TinyImageNet and ImageNet with and without \schemename. For \schemename we report the number of foreground/background pairs.}
\label{tab:dataset-stats}
\centering
\resizebox{.9\columnwidth}{!}{
\begin{tabular}{l S[table-format=4.0] S[table-format=7.0] S[table-format=5.0]}
\toprule
Dataset & {Classes} & {\makecell{Training \\ Images}} & {\makecell{Validation \\ Images}} \\
\midrule
TinyImageNet & 200 & 100000 & 10000 \\
TinyImageNet + \schemename & 200 & 99404 & 9915 \\
ImageNet & 1000 & 1281167 & 50000 \\
ImageNet + \schemename & 1000 & 1274557 & 49751 \\
\bottomrule
\end{tabular}}
\end{table}
After fixing the optimal design parameters in \Cref{tab:ablation-segment,tab:ablation-recombine} (last rows), we run \schemename's segmentation step on the entire ImageNet dataset.
\Cref{tab:dataset-stats} shows the resulting dataset statistics.
% The slightly lower number of images in \name is due to \emph{Grounded SAM} returning no or invalid detections for some images.
The slightly reduced image count for \schemename is due to instances where Grounded SAM fails to produce valid segmentation masks.
\subsection{Image Classification Results}
\begin{table}
\caption{ImageNet results of models trained on ImageNet with and without \schemename. \schemename improves the performance of most models, with a larger gain for larger models.}
\label{tab:imagenet-results}
\centering
\small
\resizebox{.8\columnwidth}{!}{\begin{tabular}{lccc}
\toprule
\multirow{2.5}{*}{Model} & \multicolumn{2}{c}{\makecell{ImageNet Accuracy [\%]}} & \multirow{2.5}{*}{Delta} \\
\cmidrule(lr){2-3} \cmidrule(lr){2-3}
& w/o \schemename & w/ \schemename & \\ & w/o \schemename & w/ \schemename & \\
\midrule \midrule
@@ -63,137 +244,100 @@
ResNet-101 & $79.4\pm0.1$ & $80.4\pm0.1$ & \grntxt{$+1.0$} \\ ResNet-101 & $79.4\pm0.1$ & $80.4\pm0.1$ & \grntxt{$+1.0$} \\
\bottomrule \bottomrule
\end{tabular}} \end{tabular}}
\end{minipage} \end{table}
\end{figure}
% \begin{table}[t] \Cref{tab:imagenet-results} compares the ImageNet performance of models trained with and without \schemename.
% \caption{ImageNet results of models trained on ImageNet with and without \schemename. \schemename improves the performance of most models, with a larger gain for larger models.} We adopt the training setup of \cite{Nauen2025} and \cite{Touvron2022} for training ViT \cite{Dosovitskiy2021}, Swin \cite{Liu2021} and ResNet \cite{He2016} (representing CNNs) models as well as the setup of DeiT \cite{Touvron2021b} for that model.
% \label{tab:imagenet-results} Both setups are using strong data augmentations like RandAugment, CutMix, and Mixup optimized for Transformers (details in supplementary material).
% \centering
% \begin{subfigure}{.41\textwidth}
% \resizebox{\textwidth}{!}{\begin{tabular}{lccc}
% \toprule
% \multirow{2.5}{*}{Model} & \multicolumn{2}{c}{\makecell{ImageNet Accuracy [\%]}} & \multirow{2.5}{*}{Delta} \\
% \cmidrule(lr){2-3}
% & w/o \schemename & w/ \schemename & \\
% \midrule
% ViT-S & $79.1\pm0.1$ & $81.4\pm0.1$ & \grntxt{$+2.3$} \\
% ViT-B & $77.6\pm0.2$ & $81.1\pm0.4$ & \grntxt{$+3.5$} \\
% ViT-L & $75.3\pm0.4$ & $79.8\pm0.1$ & \grntxt{$+4.5$} \\
% \midrule
% Swin-Ti & $77.9\pm0.2$ & $79.7\pm0.1$ & \grntxt{$+1.8$} \\
% Swin-S & $79.4\pm0.1$ & $80.6\pm0.1$ & \grntxt{$+1.2$} \\
% \bottomrule
% \end{tabular}}
% \end{subfigure}
% \hspace{5pt}
% \begin{subfigure}{.448\textwidth}
% \resizebox{\textwidth}{!}{\begin{tabular}{lccc}
% \toprule
% \multirow{2.5}{*}{Model} & \multicolumn{2}{c}{\makecell{ImageNet Accuracy [\%]}} & \multirow{2.5}{*}{Delta} \\
% \cmidrule(lr){2-3}
% & w/o \schemename & w/ \schemename & \\
% \midrule
% DeiT-S & $80.1 \pm 0.1$ & $80.0\pm0.3$ & \gtxt{$-0.1$} \\
% DeiT-B & $81.9 \pm 0.3$ & $81.9\pm0.2$ & \gtxt{$\pm0.0$} \\
% DeiT-L & $79.3\pm2.3$ & $82.4\pm0.1$ & \grntxt{$+3.1$} \\
% \midrule
% ResNet-50 & $78.3\pm0.1$ & $78.8\pm0.1$ & \grntxt{$+0.5$} \\
% ResNet-101 & $79.4\pm0.1$ & $80.4\pm0.1$ & \grntxt{$+1.0$} \\
% \bottomrule
% \end{tabular}}
% \end{subfigure}
% \end{table}
\section{Experiments}
\label{sec:experiments}
We conduct a comprehensive suit of experiments to validate the effectiveness of our approach,
comparing ImageNet training with and without \schemename for 10 different models and 5 data augmentation pipelines.
Furthermore, we assess the impact of using \schemename for pretraining on multiple fine-grained downstream datasets.
Finally, we exploit \schemename's control over the image distribution to quantify model behaviors and biases.
We always report the mean and standard deviation of three independent training runs.
\subsection{Image Classification Results}
\textbf{ImageNet training.}
\Cref{tab:imagenet-pipelines} analyzes the effect of \schemename under different data augmentation pipelines:
A \emph{basic} pipeline with RandomResizedCrop, Flip and ColorJitter, the \emph{3-Augment} pipeline from \cite{Touvron2022,Nauen2025} that also includes Grayscale, Solarization and GaussianBlur, as well as the widely used \emph{RandAugment}~\cite{Cubuk2020} based pipeline from DeiT~\cite{Touvron2021b}.
Additionally, we include MixUp~\cite{Zhang2018a} and CutMix~\cite{Yun2019} augmentations.
% We also include Mixup and CutMix.
We find that the effectiveness of \schemename depends on the interplay between model capacity and baseline augmentation strength.
When the baseline augmentation is weak or moderate, \schemename consistently improves ImageNet accuracy, with gains increasing for larger ViT models (up to $+6.0$ p.p.\ for ViT-B).
As the augmentation pipeline becomes stronger (e.g., RandAugment with MixUp and CutMix), ImageNet improvements diminish for smaller models, indicating that the baseline augmentation already saturates their capacity.
Importantly, even in cases where ImageNet accuracy does not improve, we consistently observe gains during downstream fine-tuning (see \Cref{tab:downstream-results}), suggesting that \schemename enhances representation quality beyond what is reflected by ImageNet accuracy.
\Cref{tab:imagenet-results} additionally compares performance of different model architectures.
ViT~\cite{Dosovitskiy2021}, Swin~\cite{Liu2021} and ResNet~\cite{He2016} (representing CNNs) are trained using the ``3-augment'' strategy, while DeiT~\cite{Touvron2021b} is trained using the ``RandAugment'' strategy.
Notably, \schemename improves performance across all tested architectures, including the ResNet models, % (up to $1$ p.p.), Notably, \schemename improves performance across all tested architectures, including the ResNet models, % (up to $1$ p.p.),
demonstrating benefits beyond Transformers. demonstrating benefits beyond Transformers.
% We find that \schemename's improvements counteract the drop in performance for increasing model sizes. For DeiT we only observe benefits on ImageNet for the larger models.
% Without \schemename this drop is $3.8$ p.p. (ViT-S to L), while with \schemename it is reduced to $1.6$ p.p. For other transformers, we observe improvements from $1.2$ p.p. to $4.5$ p.p. with increasing gains for larger models.
% For DeiT there is a drop of $0.8$ p.p. from small to large while when using \schemename there is a \emph{gain} of $2.4$ p.p. % This improvement is more substantial for the larger models, with ViT-L gaining $4.5$ p.p. in accuracy.
\schemename's improvements counteract the drop in performance for increasing model sizes.
Without \schemename this drop is $3.8$ p.p. (ViT-S to L), while with \schemename it is reduced to $1.6$ p.p.
For DeiT there is a drop of $0.8$ p.p. from small to large while when using \schemename there is a \emph{gain} of $2.4$ p.p.
\begin{table}
\caption{Comparison of \schemename and simple Copy-Paste methods. We train ViT-S on ImageNet using the same 3-augment data augmentation on top of the copy-paste augmentation.}
\label{tab:copy-paste-comparison}
\centering
\resizebox{\columnwidth}{!}{
\begin{tabular}{lcc S[table-format=+2.1,retain-explicit-plus,detect-inline-weight=math,detect-weight=true]}
\toprule
Augmentation & labels & \makecell{ Accuracy [\%]} & {\makecell{Delta \\to Prev.}} \\
\midrule
% Baseline & & $79.1 \pm 0.1$ \\
Baseline + \textbf{Simple Copy-Paste} & bg & $31.3 \pm 0.6$ & \\
+ mixed labels & fg + bg & $32.0 \pm 0.8$ & +0.7 \\
+ fg labels & fg & $31.6 \pm 0.9$ & -0.4 \\
+ \emph{range} foreground size variation & \gtxt{fg} & $43.0 \pm 1.2$ & \bfseries +11.4 \\
+ infilled backgrounds & \gtxt{fg} & $68.7 \pm 0.2$ & \bfseries +25.7 \\
+ \emph{cos} mixing strategy & \gtxt{fg} & $81.2 \pm 0.1$ & \bfseries +12.5 \\
+ edge smoothing & \gtxt{fg} & $81.3 \pm 0.1$ & +0.1 \\
+ background pruning$=$ \textbf{\schemename} & \gtxt{fg} & $81.4 \pm 0.1$ & +0.1 \\
\bottomrule
\end{tabular}}
\end{table}
\textbf{Comparison to Simple Copy-Paste.}
We compare \schemename to a simple adaption of the Copy-Paste augmentation inspired by \cite{Ge2023,Ghiasi2020,Shermaine2025} in \Cref{tab:copy-paste-comparison}.
Contrary to semantic segmentation we do not have foreground masks available.
Thus, we paste the extracted foreground objects from \emph{\schemename's segmentation stage} onto normal ImageNet images.
% Since such images do not have straight forward classification labels, we test multiple possibilities.
We observe 3 large jumps in accuracy: (\textbf{1}) From our \emph{range} foreground size variation (+11.4\%), (\textbf{2}) from using our infilled backgrounds instead of images from the dataset (+25.7\%), and (\textbf{3}) from our \emph{cos} mixing strategy with non-augmented images (+12.5\%).
\schemename's changes to the naive copy-paste augmentation are thus imperative for good classification performance.
\begin{table}[t] \begin{table}[t]
\caption{Downstream accuracy in percent when finetuning on other datasets. Models are pretrained on ImageNet with and without \schemename. Pretraining using \schemename increases transformer downstream accuracy. \caption{Downstream accuracy in percent when finetuning on other datasets. Models are pretrained on ImageNet with and without \schemename. Pretraining using \schemename increases transformer downstream accuracy.
% on all datasets. % on all datasets.
} }
\label{tab:downstream-results} \label{tab:downstream-results}
\begin{subfigure}{.48\columnwidth} \centering
\resizebox{\textwidth}{!}{\begin{tabular}{lcccccc} \resizebox{\columnwidth}{!}{\begin{tabular}{lcccccc}
\toprule \toprule
Model & \schemename & Aircraft & Cars & Flowers & Food & Pets \\ Model & \schemename & Aircraft & Cars & Flowers & Food & Pets \\
\midrule \midrule
ViT-S & \xmark & $72.4\pm1.0$ & $89.8\pm0.3$ & $94.5\pm0.2$ & $89.1\pm0.1$ & $93.8\pm0.2$ \\ ViT-S & \xmark & $72.4\pm1.0$ & $89.8\pm0.3$ & $94.5\pm0.2$ & $89.1\pm0.1$ & $93.8\pm0.2$ \\
ViT-S & \cmark & $78.6\pm0.5$ & $92.2\pm0.2$ & $95.5\pm0.2$ & $89.6\pm0.1$ & $94.5\pm0.2$ \\ ViT-S & \cmark & $78.6\pm0.5$ & $92.2\pm0.2$ & $95.5\pm0.2$ & $89.6\pm0.1$ & $94.5\pm0.2$ \\
& & \grntxt{$+6.2$} & \grntxt{$+2.4$} & \grntxt{$+1.0$} & \grntxt{$+0.5$} & \grntxt{$+0.7$} \\ & & \grntxt{$+6.2$} & \grntxt{$+2.4$} & \grntxt{$+1.0$} & \grntxt{$+0.5$} & \grntxt{$+0.7$} \\
\midrule \cmidrule(r){1-1}
ViT-B & \xmark & $71.7\pm0.5$ & $90.0\pm0.2$ & $94.8\pm0.4$ & $89.8\pm0.2$ & $94.1\pm0.4$ \\ ViT-B & \xmark & $71.7\pm0.5$ & $90.0\pm0.2$ & $94.8\pm0.4$ & $89.8\pm0.2$ & $94.1\pm0.4$ \\
ViT-B & \cmark & $79.0\pm2.2$ & $93.3\pm0.1$ & $ 96.5\pm0.1$ & $90.9\pm0.1$ & $95.1\pm0.4$ \\ ViT-B & \cmark & $79.0\pm2.2$ & $93.3\pm0.1$ & $ 96.5\pm0.1$ & $90.9\pm0.1$ & $95.1\pm0.4$ \\
& & \grntxt{$+7.3$} & \grntxt{$+3.3$} & \grntxt{$+1.7$} & \grntxt{$+1.1$} & \grntxt{$+1.0$} \\ & & \grntxt{$+7.3$} & \grntxt{$+3.3$} & \grntxt{$+1.7$} & \grntxt{$+1.1$} & \grntxt{$+1.0$} \\
\midrule \cmidrule(r){1-1}
ViT-L & \xmark & $72.1\pm1.0$ & $88.8\pm0.3$ & $94.4\pm0.3$ & $90.1\pm0.2$ & $94.2\pm0.4$ \\ ViT-L & \xmark & $72.1\pm1.0$ & $88.8\pm0.3$ & $94.4\pm0.3$ & $90.1\pm0.2$ & $94.2\pm0.4$ \\
ViT-L & \cmark & $77.6\pm1.2$ & $89.1\pm0.2$ & $96.6\pm0.1$ & $91.3\pm0.1$ & $95.1\pm0.1$ \\ ViT-L & \cmark & $77.6\pm1.2$ & $89.1\pm0.2$ & $96.6\pm0.1$ & $91.3\pm0.1$ & $95.1\pm0.1$ \\
& & \grntxt{$+5.5$} & \grntxt{$+0.3$} & \grntxt{$+2.2$} & \grntxt{$+1.2$} & \grntxt{$+0.9$} \\ & & \grntxt{$+5.5$} & \grntxt{$+0.3$} & \grntxt{$+2.2$} & \grntxt{$+1.2$} & \grntxt{$+0.9$} \\
\midrule \midrule
Swin-Ti & \xmark & $77.0\pm0.1$ & $91.3\pm0.6$ & $95.9\pm0.1$ & $90.0\pm0.2$ & $94.2\pm0.1$ \\
Swin-Ti & \cmark & $81.1\pm0.8$ & $92.8\pm0.4$ & $96.2\pm0.1$ & $90.4\pm0.3$ & $94.8\pm0.5$ \\
& & \grntxt{$+4.1$} & \grntxt{$+2.5$} & \grntxt{$+0.3$} & \grntxt{$+0.4$} & \grntxt{$+0.6$} \\
\midrule
Swin-S & \xmark & $75.7\pm1.4$ & $91.0\pm0.3$ & $95.9\pm0.5$ & $91.1\pm0.2$ & $94.4\pm0.1$ \\
Swin-S & \cmark & $81.4\pm0.2$ & $93.1\pm0.2$ & $96.3\pm0.3$ & $91.2\pm0.2$ & $94.9\pm0.3$ \\
& & \grntxt{$+5.7$} & \grntxt{$+2.1$} & \grntxt{$+1.4$} & \gtxt{$+0.1$} & \grntxt{$+0.5$} \\
\bottomrule
\end{tabular}}
\end{subfigure}
\hfill
\begin{subfigure}{.505\columnwidth}
\resizebox{\textwidth}{!}{\begin{tabular}{lcccccc}
\toprule
Model & \schemename & Aircraft & Cars & Flowers & Food & Pets \\
\midrule
DeiT-S & \xmark & $75.3\pm0.4$ & $91.1\pm0.2$ & $94.8\pm0.4$ & $89.2\pm0.2$ & $92.4\pm0.2$ \\ DeiT-S & \xmark & $75.3\pm0.4$ & $91.1\pm0.2$ & $94.8\pm0.4$ & $89.2\pm0.2$ & $92.4\pm0.2$ \\
DeiT-S & \cmark & $76.8\pm0.8$ & $91.9\pm0.2$ & $95.2\pm0.3$ & $89.1\pm0.2$ & $92.3\pm0.4$ \\ DeiT-S & \cmark & $76.8\pm0.8$ & $91.9\pm0.2$ & $95.2\pm0.3$ & $89.1\pm0.2$ & $92.3\pm0.4$ \\
& & \grntxt{$+1.5$} & \grntxt{$+0.8$} & \grntxt{$+0.4$} & \gtxt{$-0.1$} & \gtxt{$-0.1$} \\ & & \grntxt{$+1.5$} & \grntxt{$+0.8$} & \grntxt{$+0.4$} & \gtxt{$-0.1$} & \gtxt{$-0.1$} \\
\midrule \cmidrule(r){1-1}
DeiT-B & \xmark & $77.0\pm1.2$ & $92.9\pm0.2$ & $96.1\pm0.2$ & $91.2\pm0.1$ & $93.3\pm0.4$ \\ DeiT-B & \xmark & $77.0\pm1.2$ & $92.9\pm0.2$ & $96.1\pm0.2$ & $91.2\pm0.1$ & $93.3\pm0.4$ \\
DeiT-B & \cmark & $79.3\pm0.3$ & $93.1\pm0.1$ & $96.4\pm0.2$ & $91.3\pm0.1$ & $93.3\pm0.1$ \\ DeiT-B & \cmark & $79.3\pm0.3$ & $93.1\pm0.1$ & $96.4\pm0.2$ & $91.3\pm0.1$ & $93.3\pm0.1$ \\
& & \grntxt{$+2.3$} & \gtxt{$+0.2$} & \grntxt{$+0.3$} & \gtxt{$+0.1$} & \gtxt{$\pm0.0$} \\ & & \grntxt{$+2.3$} & \gtxt{$+0.2$} & \grntxt{$+0.3$} & \gtxt{$+0.1$} & \gtxt{$\pm0.0$} \\
\midrule \cmidrule(r){1-1}
DeiT-L & \xmark & $72.8\pm5.5$ & $92.8\pm1.0$ & $95.8\pm1.5$ & $90.5\pm2.6$ & $92.4\pm2.0$ \\ DeiT-L & \xmark & $72.8\pm5.5$ & $92.8\pm1.0$ & $95.8\pm1.5$ & $90.5\pm2.6$ & $92.4\pm2.0$ \\
DeiT-L & \cmark & $78.8\pm0.8$ & $93.8\pm0.2$ & $97.0\pm0.2$ & $92.0\pm0.2$ & $93.5\pm0.2$ \\ DeiT-L & \cmark & $78.8\pm0.8$ & $93.8\pm0.2$ & $97.0\pm0.2$ & $92.0\pm0.2$ & $93.5\pm0.2$ \\
& & \grntxt{$+6.0$} & \grntxt{$+1.0$} & \grntxt{$+1.2$} & \grntxt{$+1.5$} & \grntxt{$+1.1$} \\ & & \grntxt{$+6.0$} & \grntxt{$+1.0$} & \grntxt{$+1.2$} & \grntxt{$+1.5$} & \grntxt{$+1.1$} \\
\midrule \midrule
Swin-Ti & \xmark & $77.0\pm0.1$ & $91.3\pm0.6$ & $95.9\pm0.1$ & $90.0\pm0.2$ & $94.2\pm0.1$ \\
Swin-Ti & \cmark & $81.1\pm0.8$ & $92.8\pm0.4$ & $96.2\pm0.1$ & $90.4\pm0.3$ & $94.8\pm0.5$ \\
& & \grntxt{$+4.1$} & \grntxt{$+2.5$} & \grntxt{$+0.3$} & \grntxt{$+0.4$} & \grntxt{$+0.6$} \\
\cmidrule(r){1-1}
Swin-S & \xmark & $75.7\pm1.4$ & $91.0\pm0.3$ & $95.9\pm0.5$ & $91.1\pm0.2$ & $94.4\pm0.1$ \\
Swin-S & \cmark & $81.4\pm0.2$ & $93.1\pm0.2$ & $96.3\pm0.3$ & $91.2\pm0.2$ & $94.9\pm0.3$ \\
& & \grntxt{$+5.7$} & \grntxt{$+2.1$} & \grntxt{$+1.4$} & \gtxt{$+0.1$} & \grntxt{$+0.5$} \\
\midrule
ResNet-50 & \xmark & $78.2\pm0.5$ & $89.8\pm0.2$ & $91.7\pm0.4$ & $84.4\pm0.2$ & $93.7\pm0.3$ \\ ResNet-50 & \xmark & $78.2\pm0.5$ & $89.8\pm0.2$ & $91.7\pm0.4$ & $84.4\pm0.2$ & $93.7\pm0.3$ \\
ResNet-50 & \cmark & $80.3\pm0.4$ & $90.4\pm0.2$ & $91.7\pm0.2$ & $84.5\pm0.2$ & $93.7\pm0.3$ \\ ResNet-50 & \cmark & $80.3\pm0.4$ & $90.4\pm0.2$ & $91.7\pm0.2$ & $84.5\pm0.2$ & $93.7\pm0.3$ \\
& & \grntxt{$+2.1$} & \grntxt{$+0.6$} & \gtxt{$\pm0.0$} & \gtxt{$+0.1$} & \gtxt{$\pm0.0$} \\ & & \grntxt{$+2.1$} & \grntxt{$+0.6$} & \gtxt{$\pm0.0$} & \gtxt{$+0.1$} & \gtxt{$\pm0.0$} \\
\midrule \cmidrule(r){1-1}
ResNet-101 & \xmark & $78.4\pm0.6$ & $90.3\pm0.1$ & $91.2\pm0.5$ & $86.0\pm0.2$ & $94.3\pm0.2$ \\ ResNet-101 & \xmark & $78.4\pm0.6$ & $90.3\pm0.1$ & $91.2\pm0.5$ & $86.0\pm0.2$ & $94.3\pm0.2$ \\
ResNet-101 & \cmark & $81.4\pm0.5$ & $91.3\pm0.1$ & $92.9\pm0.2$ & $86.3\pm0.1$ & $94.0\pm0.3$ \\ ResNet-101 & \cmark & $81.4\pm0.5$ & $91.3\pm0.1$ & $92.9\pm0.2$ & $86.3\pm0.1$ & $94.0\pm0.3$ \\
& & \grntxt{$+3.0$} & \grntxt{$+1.3$} & \grntxt{$+1.7$} & \grntxt{$+0.3$} & \textcolor{red}{$-0.3$} \\ & & \grntxt{$+3.0$} & \grntxt{$+1.3$} & \grntxt{$+1.7$} & \grntxt{$+0.3$} & \textcolor{red}{$-0.3$} \\
\bottomrule \bottomrule
\end{tabular}} \end{tabular}}
\end{subfigure}
\end{table} \end{table}
\textbf{Downstream tasks.} To assess the transferability of \schemename-trained models, we finetune models pretrained on ImageNet with and without \schemename on five fine-grained datasets: \textbf{Downstream tasks.} To assess the transferability of \schemename-trained models, we finetune models pretrained on ImageNet with and without \schemename on five fine-grained datasets:
@@ -203,97 +347,19 @@ In \Cref{tab:downstream-results} we see transformer accuracies improve on all th
% and a reduction of error rate of up to $39.3\%$. % and a reduction of error rate of up to $39.3\%$.
% Notably, training with \name increases the downstream performance of DeiT-S and DeiT-B, even though the ImageNet results were the same. % Notably, training with \name increases the downstream performance of DeiT-S and DeiT-B, even though the ImageNet results were the same.
% This demonstrates that the improved representations from training on \name translate to superior performance beyond gains from better ImageNet performance. % This demonstrates that the improved representations from training on \name translate to superior performance beyond gains from better ImageNet performance.
Notably, training with \schemename boosts the downstream performance of DeiT-S and DeiT-B, despite similar ImageNet accuracy. Notably, training with \schemename boosts the downstream performance of DeiT-S and DeiT-B, despite similar ImageNet results.
This shows, that the improved representations from training with \schemename translate to gains beyond better ImageNet scores. This shows the improved representations from training with \schemename translate to gains beyond better ImageNet scores.
% not only on ImageNet, but also on fine-grained image classification tasks. % not only on ImageNet, but also on fine-grained image classification tasks.
\begin{table}[t]
\caption{Evaluation of models trained on ImageNet with and without \schemename. \schemename generally increases models' robustness to different image distribution shifts. Note that ViT-S \emph{with} \schemename outperforms DeiT-S, the only model where \schemename does not increase robustness.}
\label{tab:robustness-datasets}
\begin{subfigure}{.485\textwidth}
\resizebox{\textwidth}{!}{
\begin{tabular}{lccccccc}
\toprule
Model & w/ \schemename & IN-Hard & IN-A & IN-C & IN-R & IN-V2 \\
\midrule
ViT-S & \xmark & $18.1 \pm 0.6$ & $18.8 \pm 0.2$ & $44.7 \pm 0.8$ & $41.6 \pm 0.6$ & $67.3 \pm 0.4$ \\
ViT-S & \cmark & $21.0 \pm 0.4$ & $26.5 \pm 0.4$ & $52.6 \pm 0.6$ & $49.8 \pm 0.3$ & $70.6 \pm 0.1$ \\
& & \grntxt{$+2.9$} & \grntxt{$+7.7$} & \grntxt{$+7.9$} & \grntxt{$+8.1$} & \grntxt{$+3.3$} \\
\midrule
ViT-B & \xmark & $17.0 \pm 0.4$ & $15.8 \pm 0.7$ & $40.4 \pm 0.8$ & $38.4 \pm 0.7$ & $65.1 \pm 0.6$ \\
ViT-B & \cmark & $22.0 \pm 0.9$ & $31.9 \pm 1.5$ & $51.6 \pm 1.8$ & $48.7 \pm 1.7$ & $70.3 \pm 0.9$ \\
& & \grntxt{$+5.0$} & \grntxt{$+16.0$} & \grntxt{$+11.2$} & \grntxt{$+10.3$} & \grntxt{$+5.2$} \\
\midrule
ViT-L & \xmark & $15.6 \pm 0.4$ & $11.3 \pm 0.9$ & $38.4 \pm 1.0$ & $36.8 \pm 0.8$ & $61.6 \pm 0.8$ \\
ViT-L & \cmark & $20.6 \pm 0.1$ & $30.4 \pm 0.5$ & $48.2 \pm 0.7$ & $46.0 \pm 0.4$ & $68.7 \pm 0.3$ \\
& & \grntxt{$+5.0$} & \grntxt{$+19.0$} & \grntxt{$+9.8$} & \grntxt{$+9.3$} & \grntxt{$+7.1$} \\
\midrule
Swin-Ti & \xmark & $16.2 \pm 0.4$ & $15.0 \pm 0.3$ & $36.0 \pm 0.8$ & $36.6 \pm 0.2$ & $65.5 \pm 0.4$ \\
Swin-Ti & \cmark & $18.3 \pm 0.3$ & $20.3 \pm 0.4$ & $41.4 \pm 0.8$ & $41.4 \pm 0.2$ & $68.2 \pm 0.4$ \\
& & \grntxt{$+2.2$} & \grntxt{$+5.4$} & \grntxt{$+5.4$} & \grntxt{$+4.8$} & \grntxt{$+2.7$} \\
\midrule
Swin-S & \xmark & $18.2 \pm 0.3$ & $19.4 \pm 0.3$ & $39.0 \pm 0.7$ & $39.1 \pm 0.2$ & $67.5 \pm 0.1$ \\
Swin-S & \cmark & $20.5 \pm 0.1$ & $27.7 \pm 0.4$ & $45.6 \pm 0.8$ & $44.1 \pm 0.3$ & $69.6 \pm 0.1$ \\
& & \grntxt{$+2.2$} & \grntxt{$+8.4$} & \grntxt{$+6.6$} & \grntxt{$+5.0$} & \grntxt{$+2.2$} \\
\bottomrule
\end{tabular}
}
\end{subfigure}
\hfill
\begin{subfigure}{.505\textwidth}
\resizebox{\textwidth}{!}{
\begin{tabular}{lccccccc}
\toprule
Model & w/ \schemename & IN-Hard & IN-A & IN-C & IN-R & IN-V2 \\
\midrule
DeiT-S & \xmark & $19.5 \pm 0.2$ & $18.4 \pm 0.3$ & $58.8 \pm 0.7$ & $43.0 \pm 0.1$ & $68.8 \pm 0.2$ \\
DeiT-S & \cmark & $18.5 \pm 0.5$ & $17.3 \pm 1.0$ & $57.0 \pm 0.9$ & $43.8 \pm 0.2$ & $68.7 \pm 0.6$ \\
& & \rdtxt{$-1.0$} & \rdtxt{$-1.1$} & \rdtxt{$-1.8$} & \grntxt{$+0.8$} & \gtxt{$-0.1$} \\
\midrule
DeiT-B & \xmark & $22.6 \pm 0.2$ & $26.0 \pm 0.2$ & $62.1 \pm 1.0$ & $45.6 \pm 1.9$ & $70.6 \pm 0.9$ \\
DeiT-B & \cmark & $22.6 \pm 0.2$ & $25.0 \pm 0.3$ & $62.8 \pm 0.6$ & $47.7 \pm 0.8$ & $70.8 \pm 0.5$ \\
& & \gtxt{$\pm 0.0$} & \rdtxt{$-1.0$} & \grntxt{$+0.8$} & \grntxt{$+2.0$} & \gtxt{$+0.2$} \\
\midrule
DeiT-L & \xmark & $21.2 \pm 2.0$ & $20.2 \pm 3.4$ & $59.3 \pm 4.3$ & $41.3 \pm 2.7$ & $66.9 \pm 2.8$ \\
DeiT-L & \cmark & $23.4 \pm 0.3$ & $28.8 \pm 2.0$ & $63.4 \pm 0.7$ & $47.8 \pm 0.6$ & $71.6 \pm 0.5$ \\
& & \grntxt{$+2.2$} & \grntxt{$+8.7$} & \grntxt{$+4.1$} & \grntxt{$+6.5$} & \grntxt{$+4.7$} \\
\midrule
ResNet50 & \xmark & $16.1 \pm 0.2$ & $9.7 \pm 0.1$ & $38.0 \pm 1.0$ & $40.5 \pm 0.6$ & $66.8 \pm 0.4$ \\
ResNet50 & \cmark & $17.2 \pm 0.1$ & $10.8 \pm 0.4$ & $41.0 \pm 0.7$ & $43.7 \pm 0.3$ & $67.5 \pm 0.1$ \\
& & \grntxt{$+1.1$} & \grntxt{$+1.1$} & \grntxt{$+3.0$} & \grntxt{$+3.2$} & \grntxt{$+0.7$} \\
\midrule
ResNet101 & \xmark & $18.2 \pm 0.4$ & $14.3 \pm 0.1$ & $41.7 \pm 0.7$ & $42.3 \pm 0.1$ & $67.7 \pm 0.5$ \\
ResNet101 & \cmark & $19.9 \pm 0.2$ & $17.6 \pm 0.5$ & $46.3 \pm 0.6$ & $46.3 \pm 0.3$ & $69.5 \pm 0.3$ \\
& & \grntxt{$+1.7$} & \grntxt{$+3.2$} & \grntxt{$+4.6$} & \grntxt{$+4.0$} & \grntxt{$+1.8$} \\
\bottomrule
\end{tabular}
}
\end{subfigure}
\end{table}
\subsection{Bias and Robustness Evaluation} \subsection{Bias and Robustness Evaluation}
% Additional to just using \name for training, its special properties and posibilities for adjustment of the data distribution make it a valuable tool for evaluating other model properties and biases.
Beyond its use for training, \schemename's unique properties and controlled data generation capabilities make it a powerful tool for analyzing behavior and biases of black-box models. Beyond its use for training, \schemename's unique properties and controlled data generation capabilities make it a powerful tool for analyzing behavior and biases of black-box models.
We exploit this in two complementary ways.
First, we ask whether \schemename-trained models are more robust on \emph{external} ImageNet robustness benchmarks that are not generated by our pipeline.
Second, we use \schemename's fine-grained control for targeted evaluation of specific dimensions of model bias, such as background reliance and center/size bias.
% Together, these experiments allow us to both \emph{probe} and \emph{improve} robustness along clearly defined axes.
% This combination of standard benchmarks and controlled probes allows us to both quantify robustness improvements and attribute them to changes in particular model behaviors.
\textbf{Robustness on External Distribution Shifts.} \begin{figure*}
\Cref{tab:robustness-datasets} summarizes accuracy on five widely used ImageNet robustness benchmarks: ImageNet-Hard~\cite{Taesiri2023}, ImageNet-A~\cite{Hendrycks2021}, ImageNet-C~\cite{Hendrycks2019}, ImageNet-R~\cite{Hendrycks2021a}, and ImageNetV2~\cite{Recht2019}.
Across ViTs, Swin Transformers, and ResNets, incorporating \schemename during training generally improves robustness to all considered distribution shifts.
For ViTs, the gains are substantial: for example, ViT-B improves from $15.8\%$ to $31.9\%$ accuracy on ImageNet-A ($+16.0$ p.p.) and from $40.4\%$ to $51.6\%$ on ImageNet-C ($+11.2$ p.p.), with similar improvements for ViT-S and ViT-L.
Swin also benefits consistently, with increases of roughly $2$--$8$ p.p. on most benchmarks, and ResNet sees smaller but steady gains (e.g., up to $+4.6$ points on ImageNet-C).
For DeiT, the picture is more nuanced: DeiT-B and DeiT-L still enjoy robustness improvements, whereas DeiT-S exhibits small decreases on several benchmarks.
Interestingly, however, ViT-S trained with \schemename outperforms the DeiT-S baseline.
This suggests that controlled composition can partially close the robustness gap between lightly and heavily regularized models.
Overall, the consistent improvements on corruption-based, natural and hard examples indicate that the compositional invariances induced by \schemename extend beyond the specific foreground/background manipulations used in its construction.
\begin{figure*}[t]
\centering \centering
\includegraphics[width=\textwidth]{img/bg_robustness.pdf} \includegraphics[width=.95\textwidth]{img/bg_robustness.pdf}
\caption{Evaluation of background robustness on ImageNet + \schemename, ImageNet9~\cite{Xiao2020} and CounterAnimal~\cite{Wang2024f}. \caption{Evaluation of background robustness on ImageNet + \schemename, ImageNet9 and CounterAnimal.
We plot the in-distribution (top of arrow) and the out-of-distribution (bottom of arrow) accuracy when training with and without \schemename. We plot the in-distribution (top of arrow) and the out-of-distribution (bottom of arrow) accuracy when training with and without \schemename.
We annotate each arrow with its length $\Delta$. We annotate each arrow with its length $\Delta$.
Training with \schemename improves the background robustness of all transformers by mostly boosting the out-of-distribution accuracy. Training with \schemename improves the background robustness of all transformers by mostly boosting the out-of-distribution accuracy.
@@ -311,16 +377,17 @@ We assess the robustness of models to shifts in the background distribution from
% \text{Background Robustness} = \frac{\text{Acc}(\name_\text{all})}{\text{Acc}(\name_\text{same})} % \text{Background Robustness} = \frac{\text{Acc}(\name_\text{all})}{\text{Acc}(\name_\text{same})}
% \end{align} % \end{align}
% It represents the relative drop in performance under a background distribution shift. % It represents the relative drop in performance under a background distribution shift.
\Cref{fig:background-robustness} presents the background robustness results for three datasets: ImageNet with \schemename (all backgrounds vs. backgrounds of same class), ImageNet9~\cite{Xiao2020} (random backgrounds vs. original backgrounds), and CounterAnimal~\cite{Wang2024f} (counter vs. common background). \Cref{fig:background-robustness} presents the background robustness results for three datasets: ImageNet with \schemename (all backgrounds vs. backgrounds of same class), ImageNet9 \cite{Xiao2020} (random backgrounds vs. original backgrounds), and CounterAnimal \cite{Wang2024f} (counter vs. common background).
The top triangle of each arrow represents the in-distribution backgrounds and the bottom triangle represents the out-of-distribution ones. The top triangle of each arrow represents the in-distribution backgrounds and the bottom triangle represents the out-of-distribution ones.
We follow ImageNet9 and CounterAnimal and assess the background robustness in terms of the accuracy gap when evaluating a model on images of normal background distribution compared to out-of-distribution backgrounds (length of each arrow; $\Delta$). We follow ImageNet9 and CounterAnimal and assess the background robustness in terms of the accuracy gap when evaluating a model on images of normal background distribution compared to out-of-distribution backgrounds (length of each arrow; $\Delta$).
% When trained on ImageNet, smaller models generally exhibit greater robustness to changes in the background distribution than larger models and ResNet is more robust than the tested Transformer models. % When trained on ImageNet, smaller models generally exhibit greater robustness to changes in the background distribution than larger models and ResNet is more robust than the tested Transformer models.
Crucially, \schemename improves the background robustness of all models and across datasets, reducing the background-gap by boosting the performance on the out-of-background-distribution samples more than the in-distribution ones. Crucially, \schemename improves the background robustness of all models and across datasets, reducing the background-gap by boosting the performance on the out-of-background-distribution samples more than the in-distribution ones.
We find a similar trend for the Corner-Cases~\cite{Fatima2025} dataset (see supplementary), highlighting the generalization benefits of \schemename to unusual image compositions. % to $\approx1.00$, meaning that these models are agnostic to the choice of background and only classify based on the foreground.
These findings highlight the generalization benefits of \schemename to unusual image compositions.
\begin{figure*}[t] \begin{figure*}
\centering \centering
\includegraphics[width=\textwidth]{img/fg_focus.pdf} \includegraphics[width=.95\textwidth]{img/fg_focus.pdf}
\caption{Evaluation of the foreground focus (\Cref{eq:fg-focus}) using GradCam, GradCam++ and IntegratedGradients (IG) of models trained on ImageNet. Training with \schemename improves the foreground focus of almost all models.} \caption{Evaluation of the foreground focus (\Cref{eq:fg-focus}) using GradCam, GradCam++ and IntegratedGradients (IG) of models trained on ImageNet. Training with \schemename improves the foreground focus of almost all models.}
\label{fig:foreground-focus} \label{fig:foreground-focus}
\end{figure*} \end{figure*}
@@ -351,53 +418,40 @@ We hypothesize Swin's below-uniform foreground focus with GradCam is due to its
We calculate center bias according to \Cref{eq:center-bias}. We calculate center bias according to \Cref{eq:center-bias}.
Using \schemename significantly reduces models' center bias.} Using \schemename significantly reduces models' center bias.}
\label{tab:center-bias} \label{tab:center-bias}
\begin{subfigure}{.48\columnwidth} \centering
\resizebox{\textwidth}{!}{ \resizebox{.78\columnwidth}{!}{
\begin{tabular}{lccc} \begin{tabular}{lccc}
\toprule \toprule
\multirow{2.5}{*}{Model} & \multicolumn{2}{c}{\makecell{Center Bias [\%] when trained}} & \multirow{2.5}{*}{Delta} \\ \multirow{2.5}{*}{Model} & \multicolumn{2}{c}{\makecell{Center Bias [\%] when trained}} & \multirow{2.5}{*}{Delta} \\
\cmidrule(lr){2-3} \cmidrule(lr){2-3}
& w/o \schemename & w/ \schemename \\ & w/o \schemename & w/ \schemename \\
\midrule \midrule
ViT-S & \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-S_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-S_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-S_ImageNet_v3.pdf} & \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-S_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-S_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-S_RecombNet_all_v3.pdf} \\ ViT-S & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ViT-S_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-S_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-S_ImageNet_v3.pdf}} & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ViT-S_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-S_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-S_RecombNet_all_v3.pdf}} \\
& $25.5\pm0.8$ & $22.0\pm0.3$ & \grntxt{$-3.5$} \\ & $25.5\pm0.8$ & $22.0\pm0.3$ & \grntxt{$-3.5$} \\
ViT-B & {\includegraphics[width=.08\columnwidth, valign=c]{img/ViT-B_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-B_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-B_ImageNet_v3.pdf}} & \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-B_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-B_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-B_RecombNet_all_v3.pdf} \\ ViT-B & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ViT-B_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-B_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-B_ImageNet_v3.pdf}} & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ViT-B_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-B_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-B_RecombNet_all_v3.pdf}} \\
& $25.4\pm0.4$ & $19.0\pm0.2$ & \grntxt{$-6.4$} \\ & $25.4\pm0.4$ & $19.0\pm0.2$ & \grntxt{$-6.4$} \\
ViT-L & \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-L_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-L_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-L_ImageNet_v3.pdf} & \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-L_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-L_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ViT-L_RecombNet_all_v3.pdf} \\ ViT-L & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ViT-L_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-L_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-L_ImageNet_v3.pdf}} & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ViT-L_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-L_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ViT-L_RecombNet_all_v3.pdf}} \\
& $24.3\pm1.1$ & $11.7\pm0.7$ & \grntxt{$-12.6$} \\ & $24.3\pm1.1$ & $11.7\pm0.7$ & \grntxt{$-12.6$} \\
\midrule \midrule
Swin-Ti & {\includegraphics[width=.08\columnwidth, valign=c]{img/Swin-Ti_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-Ti_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-Ti_ImageNet_v3.pdf}} & {\includegraphics[width=.08\columnwidth, valign=c]{img/Swin-Ti_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-Ti_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-Ti_RecombNet_all_v3.pdf}} \\ DeiT-S & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/DeiT-S_ImageNet_vNone.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-S_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-S_ImageNet_v3.pdf} } & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/DeiT-S_fornet_all_linear_v1.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-S_fornet_all_linear_v2.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-S_fornet_all_linear_v3.pdf}} \\
& $25.0\pm0.7$ & $16.5\pm0.2$ & \grntxt{$-8.5$} \\
Swin-S & {\includegraphics[width=.08\columnwidth, valign=c]{img/Swin-S_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-S_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-S_ImageNet_v3.pdf}} & {\includegraphics[width=.08\columnwidth, valign=c]{img/Swin-S_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-S_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/Swin-S_RecombNet_all_v3.pdf}} \\
& $23.2\pm0.1$ & $15.6\pm0.2$ & \grntxt{$-7.6$} \\
\bottomrule
\end{tabular} }
\end{subfigure}
\hfill
\begin{subfigure}{.497\columnwidth}
\resizebox{\textwidth}{!}{
\begin{tabular}{lccc}
\toprule
\multirow{2.5}{*}{Model} & \multicolumn{2}{c}{\makecell{Center Bias [\%] when trained}} & \multirow{2.5}{*}{Delta} \\
\cmidrule(lr){2-3}
& w/o \schemename & w/ \schemename \\
\midrule
DeiT-S & {\includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-S_ImageNet_vNone.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-S_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-S_ImageNet_v3.pdf} } & {\includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-S_fornet_all_linear_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-S_fornet_all_linear_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-S_fornet_all_linear_v3.pdf}} \\
& $20.4 \pm 0.2$ & $21.2 \pm 0.1$ & \gtxt{$+0.8$} \\ & $20.4 \pm 0.2$ & $21.2 \pm 0.1$ & \gtxt{$+0.8$} \\
DeiT-B & {\includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-B_ImageNet_vNone.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-B_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-B_ImageNet_v3.pdf} } & {\includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-B_fornet_all_cos_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-B_fornet_all_cos_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-B_fornet_all_cos_v3.pdf}} \\ DeiT-B & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/DeiT-B_ImageNet_vNone.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-B_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-B_ImageNet_v3.pdf} } & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/DeiT-B_fornet_all_cos_v1.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-B_fornet_all_cos_v2.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-B_fornet_all_cos_v3.pdf}} \\
& $19.0 \pm 0.7$ & $19.0 \pm 0.2$ & \gtxt{$\pm0.0$} \\ & $19.0 \pm 0.7$ & $19.0 \pm 0.2$ & \gtxt{$\pm0.0$} \\
DeiT-L & { \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-L_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-L_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-L_ImageNet_v3.pdf} } & { \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-L_fornet_all_cos_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-L_fornet_all_cos_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/DeiT-L_fornet_all_cos_v3.pdf} } \\ DeiT-L & \raisebox{-6pt}{ \includegraphics[width=.08\columnwidth]{img/DeiT-L_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-L_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-L_ImageNet_v3.pdf} } & \raisebox{-6pt}{ \includegraphics[width=.08\columnwidth]{img/DeiT-L_fornet_all_cos_v1.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-L_fornet_all_cos_v2.pdf} \includegraphics[width=.08\columnwidth]{img/DeiT-L_fornet_all_cos_v3.pdf} } \\
& $21.2 \pm 0.2$ & $18.0 \pm 0.2$ & \grntxt{$-3.2$} \\ & $21.2 \pm 0.2$ & $18.0 \pm 0.2$ & \grntxt{$-3.2$} \\
\midrule \midrule
ResNet50 & {\includegraphics[width=.08\columnwidth, valign=c]{img/ResNet50_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet50_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet50_ImageNet_v3.pdf}} & {\includegraphics[width=.08\columnwidth, valign=c]{img/ResNet50_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet50_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet50_RecombNet_all_v3.pdf}} \\ Swin-Ti & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/Swin-Ti_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-Ti_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-Ti_ImageNet_v3.pdf}} & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/Swin-Ti_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-Ti_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-Ti_RecombNet_all_v3.pdf}} \\
& $25.0\pm0.7$ & $16.5\pm0.2$ & \grntxt{$-8.5$} \\
Swin-S & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/Swin-S_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-S_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-S_ImageNet_v3.pdf}} & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/Swin-S_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-S_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth]{img/Swin-S_RecombNet_all_v3.pdf}} \\
& $23.2\pm0.1$ & $15.6\pm0.2$ & \grntxt{$-7.6$} \\
\midrule
ResNet50 & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ResNet50_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet50_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet50_ImageNet_v3.pdf}} & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ResNet50_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet50_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet50_RecombNet_all_v3.pdf}} \\
& $26.3\pm0.3$ & $19.7\pm0.3$ & \grntxt{$-6.6$} \\ & $26.3\pm0.3$ & $19.7\pm0.3$ & \grntxt{$-6.6$} \\
ResNet101 & {\includegraphics[width=.08\columnwidth, valign=c]{img/ResNet101_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet101_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet101_ImageNet_v3.pdf}} & {\includegraphics[width=.08\columnwidth, valign=c]{img/ResNet101_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet101_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth, valign=c]{img/ResNet101_RecombNet_all_v3.pdf}} \\ ResNet101 & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ResNet101_ImageNet_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet101_ImageNet_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet101_ImageNet_v3.pdf}} & \raisebox{-6pt}{\includegraphics[width=.08\columnwidth]{img/ResNet101_RecombNet_all_v1.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet101_RecombNet_all_v2.pdf} \includegraphics[width=.08\columnwidth]{img/ResNet101_RecombNet_all_v3.pdf}} \\
& $23.0\pm0.3$ & $19.9\pm0.2$ & \grntxt{$-3.1$} \\ & $23.0\pm0.3$ & $19.9\pm0.2$ & \grntxt{$-3.1$} \\
\bottomrule \bottomrule
\end{tabular} } \end{tabular} }
\end{subfigure} \includegraphics[width=.8\columnwidth]{img/colorbar_horizontal.pdf}
\centering
\includegraphics[width=.5\columnwidth]{img/colorbar_horizontal.pdf}
\end{table} \end{table}
\textbf{Center Bias.} \textbf{Center Bias.}
@@ -420,14 +474,13 @@ Performance is generally highest in the center and lowest in the four corners.
Interestingly, ImageNet-trained models perform slightly better when the foreground object is on the right side of the image, compared to the left side, despite our use of random flipping with a probability of $0.5$ during training. Interestingly, ImageNet-trained models perform slightly better when the foreground object is on the right side of the image, compared to the left side, despite our use of random flipping with a probability of $0.5$ during training.
% Training on \name reduces the center bias of all models by at least half. % Training on \name reduces the center bias of all models by at least half.
Using \schemename significantly reduces center bias across models, with a more uniform performance especially across the middle row. Using \schemename significantly reduces center bias across models, with a more uniform performance especially across the middle row.
% On corner-cases (see supplementary) we find that
% Their accuracy is higher in the center left and right cells than in the center top and bottom ones, which is not the case for ImageNet-trained models. % Their accuracy is higher in the center left and right cells than in the center top and bottom ones, which is not the case for ImageNet-trained models.
% This demonstrates that \schemename promotes a more uniform spatial attention distribution, counteracting the center-bias of ImageNet. % This demonstrates that \schemename promotes a more uniform spatial attention distribution, counteracting the center-bias of ImageNet.
Thus, \schemename makes the model recognize objects across a wider spatial distribution, counteracting the center-bias of ImageNet. Thus, \schemename makes the model recognize objects across a wider spatial distribution, counteracting the center-bias of ImageNet.
\begin{figure}[t!] \begin{figure}[t!]
\centering \centering
\includegraphics[width=\columnwidth]{img/size_bias_wide.pdf} \includegraphics[width=\columnwidth]{img/size_bias_grid.pdf}
\caption{Evaluation of the size bias of models trained on ImageNet. We plot the accuracy relative to the accuracy when using the default size ($f_\text{size} = 1.0$).} \caption{Evaluation of the size bias of models trained on ImageNet. We plot the accuracy relative to the accuracy when using the default size ($f_\text{size} = 1.0$).}
\label{fig:size-bias} \label{fig:size-bias}
\end{figure} \end{figure}
@@ -439,87 +492,6 @@ We introduce a size factor $f_\text{size}$ by which we additionally scale the fo
Results are normalized by the accuracy when using $f_\text{size} = 1.0$. Results are normalized by the accuracy when using $f_\text{size} = 1.0$.
\Cref{fig:size-bias} shows the size bias curves of models trained with and without \schemename. \Cref{fig:size-bias} shows the size bias curves of models trained with and without \schemename.
% When training on \name, the resulting model keeps it's good performance on smaller foreground objects, while models trained on ImageNet fall of faster and lower. % When training on \name, the resulting model keeps it's good performance on smaller foreground objects, while models trained on ImageNet fall of faster and lower.
Models trained using \schemename perform better, especially with smaller foreground objects. Models trained using \schemename maintain perform better, especially with smaller foreground objects.
%, when ImageNet-trained models exhibit a more rapid performance decline. %, when ImageNet-trained models exhibit a more rapid performance decline.
Therefore, \schemename-training improves robustness to variations in object scale, especially for larger models. Therefore, \schemename-training improves robustness to variations in object scale, especially for larger models.
\subsection{Design Choices of \schemename}
We next analyze key components of \schemename, focusing on three questions: how it compares to simple copy-paste, how background choice affects performance, and how reliably labels are preserved after recomposition.
Additional ablations over variants and hyperparameters are provided in the supplementary material.
\begin{table}[t]
\caption{Comparison of \schemename and simple Copy-Paste methods. We train ViT-S on ImageNet using the same 3-augment data augmentation on top of the copy-paste augmentation.}
\label{tab:copy-paste-comparison}
\centering
\resizebox{.66\columnwidth}{!}{
\begin{tabular}{lcc S[table-format=+2.1,retain-explicit-plus,detect-inline-weight=math,detect-weight=true]}
\toprule
Augmentation & labels & \makecell{ Accuracy [\%]} & {\makecell{Delta \\to Prev.}} \\
\midrule
% Baseline & & $79.1 \pm 0.1$ \\
3-Augment + \textbf{Simple Copy-Paste} & bg & $31.3 \pm 0.6$ & \\
+ mixed labels & fg + bg & $32.0 \pm 0.8$ & +0.7 \\
+ fg labels & fg & $31.6 \pm 0.9$ & -0.4 \\
+ \emph{range} foreground size variation & \gtxt{fg} & $43.0 \pm 1.2$ & \bfseries +11.4 \\
+ infilled backgrounds & \gtxt{fg} & $68.7 \pm 0.2$ & \bfseries +25.7 \\
+ \emph{cos} mixing strategy & \gtxt{fg} & $81.2 \pm 0.1$ & \bfseries +12.5 \\
+ edge smoothing & \gtxt{fg} & $81.3 \pm 0.1$ & +0.1 \\
+ background pruning$=$ \textbf{\schemename} & \gtxt{fg} & $81.4 \pm 0.1$ & +0.1 \\
\bottomrule
\end{tabular}}
\end{table}
\textbf{Comparison to Simple Copy-Paste.}
We compare \schemename to a simple adaption of the Copy-Paste augmentation inspired by \cite{Ge2023,Ghiasi2021,Shermaine2025} in \Cref{tab:copy-paste-comparison}.
Contrary to semantic segmentation we do not have foreground masks available.
Thus, we paste the extracted objects from \textbf{\schemename's segmentation stage} onto normal ImageNet images.
% Since such images do not have straight forward classification labels, we test multiple possibilities.
We observe 3 large jumps in accuracy: (\textbf{1}) From our \emph{range} foreground size variation (+11.4\%), (\textbf{2}) from using our infilled backgrounds instead of images from the dataset (+25.7\%), and (\textbf{3}) from our \emph{cos} mixing strategy with non-augmented images (+12.5\%).
\schemename's changes to the naive copy-paste augmentation are thus imperative for good classification performance.
\begin{figure}[t]
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=\textwidth]{img/strategy.pdf}
\captionof{figure}{We compare Original, Same-class, and All-classes background selection using ViT-Ti and ViT-S backbones on TinyImageNet.
Increasing background diversity consistently improves classification accuracy.
}
\label{fig:background-strategy}
\end{minipage}
\hfill
\begin{minipage}[c]{.49\textwidth}
\centering
\includegraphics[width=\textwidth]{img/mask_expansion.pdf}
\captionof{figure}{
We vary the foreground mask area for TinyImageNet by shrinking or expanding masks relative to the original outline and report accuracy when training on $100\%$ augmented samples.
Performance is stable for expanded masks and degrades rapidly after shrinking masks.
}
\label{fig:mask-expansion}
\end{minipage}
\end{figure}
\textbf{Background Choice Strategy.}
\Cref{fig:background-strategy} shows the effect of background selection on TinyImageNet accuracy, where we trade off diversity against context plausibility.
% Using the original inpainted background yields the lowest accuracy, indicating limited regularization from contextual cues.
% Sampling backgrounds from the same class provides a modest but consistent improvement, suggesting that mild context variation encourages robustness while preserving semantic plausibility.
The best performance is achieved by sampling backgrounds from all classes, which introduces substantial context shifts, but leads to the strongest accuracy gains for both ViT-Ti and ViT-S.
Thus, aggressive background diversification is more important than context plausibility and acts as an effective form of context-based regularization rather than introducing harmful noise.
\textbf{Label Integrity.}
% We assess the label integrity of \schemename, i.e., whether object labels remain correct after recombination, by verifying that the intended object is accurately extracted.
% To this end, we leverage the object bounding box annotations provided in the ImageNet validation set.
% Specifically, we compute the \emph{box precision}, defined as the fraction of the predicted mask area that lies within the ground-truth bounding box, obtaining a mean value of $91\%$.
% In addition, we measure the \emph{box-to-box IoU}, computed as the IoU between the tight bounding box enclosing the predicted mask and the tight bounding box of the ground-truth annotation, which yields a high $76.1\%$.
% Qualitative examples of the predicted masks and bounding boxes are provided in the supplementary material.
% We additionally test label integrity under systematic mask perturbations by expanding or shrinking the foreground masks before composition.
% Concretely, starting from the original outline, we erode or dilate the mask such that the foreground area changes by some percentage.
% \Cref{fig:mask-expansion} shows that accuracy is relatively stable for expanded masks, but drops off significantly for eroded masks, consistent with cropping away semantically important object parts.
% This experiment suggests, that \schemename is relatively robust to artifacts from including an object's original background in the foreground mask.
% Overall, these results indicate that the segmentation stage of \schemename reliably isolates the target class object, thereby preserving label correctness after recombination.
To quantify whether recombined images still depict the intended class, we evaluate the segmentation stage of \schemename on ImageNet validation boxes.
Our predicted masks achieve a mean box precision of $91.0\%$ (fraction of mask area inside the ground-truth bounding boxes of the ImageNet validation set) and a high box-to-box IoU of $76.1\%$, indicating that they tightly capture the target object.
Qualitative examples of the predicted masks and bounding boxes are provided in the supplementary material.
We further probe robustness to mask imprecision by eroding or dilating masks such that the foreground area changes by a fixed percentage before composition.
As shown in \Cref{fig:mask-expansion}, accuracy remains stable for expansions but drops sharply under erosion, consistent with removing semantically important object parts.
Together, these results suggest that (\textit{i}) \schemename reliably isolates the target objects and preserves label integrity and that (\textit{ii}) \schemename is robust to artifacts from an object's original background and degrades mainly when the foreground no longer contains the full object.

View File

@@ -3,69 +3,71 @@
\section{Introduction} \section{Introduction}
\label{sec:intro} \label{sec:intro}
% \begin{figure} % \begin{itemize}
% \centering % \item General Into Image classification
% \includegraphics[width=.5\columnwidth]{img/fig-1.pdf} % \item ImageNet
% \caption{\schemename factorizes each training image into a foreground object and a background, then recombines them on the fly while controlling background identity, object position, and object scale. Standard, strong augmentations are applied afterwards.} % \item CNNs $\to$ Transformers
% \label{fig:fig-1} % \item Traditional Data Augmentation: CNNs
% \end{figure} % \item Problems with that: Other model properties of Transformers
\begin{table}[t] % \item Our approach: Recombining ImageNet forgrounds and backgrounds
\caption{Examples of \schemename generated images (center cropped) from ImageNet. % \end{itemize}
We successfully segment even multiple objects (\textit{Macaw}) and complex shapes (\textit{Cricket}).}
\label{tab:foraug-examples} \begin{figure}
\centering \centering
\resizebox{.9\textwidth}{!}{ \includegraphics[width=\columnwidth]{img/fig-1.pdf}
\begin{tabular}{ccccc} \caption{Comparison of traditional image classification training and training when using \schemename. \schemename recombines foreground objects with different backgrounds each epoch, thus creating a more diverse training set. We still apply strong traditional data augmentation afterwards.}
\toprule \label{fig:fig-1}
Class & \makecell{Original \\Image} & \makecell{Extracted \\Foreground} & \makecell{Infilled \\Background} & Recombined Examples \\ \end{figure}
\midrule
Macaw & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n01818515_31507.JPEG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n01818515_31507_v1_fg.PNG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n01818515_31507_v1_bg.JPEG} & \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01818515_31507_recombined_v12.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01818515_31507_recombined_v15.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01818515_31507_recombined_v18.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01818515_31507_recombined_v3.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01818515_31507_recombined_v4.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01818515_31507_recombined_v6.JPEG} \\ Image classification, a fundamental task in computer vision (CV), involves assigning labels to images from a set of categories.
% Conch & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n01943899_20070.JPEG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n01943899_20070_fg.PNG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n01943899_20070_bg.JPEG} & \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01943899_20070_recombined_v9.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01943899_20070_recombined_v10.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01943899_20070_recombined_v11.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01943899_20070_recombined_v12.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01943899_20070_recombined_v17.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n01943899_20070_recombined_v8.JPEG} \\ It underpins a wide range of applications, like medical diagnosis~\cite{Sanderson2022,Vezakis2024}, autonomous driving~\cite{Wang2022b}, and object recognition~\cite{Carion2020,He2017,Girshick2013} and facilitates large-scale pretraining~\cite{Dosovitskiy2021,Liu2021,Touvron2021b}, and progress evaluation in CV~\cite{Khan2022, Rangel2024}.
Cricket & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n02229544_6170.JPEG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n02229544_6170_fg.PNG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n02229544_6170_bg.JPEG} & \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n02229544_6170_recombined_v0.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n02229544_6170_recombined_v10.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n02229544_6170_recombined_v15.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n02229544_6170_recombined_v16.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n02229544_6170_recombined_v2.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n02229544_6170_recombined_v6.JPEG} \\ % Furthermore, image classification is used for large-scale pretraining of vision models~\cite{Dosovitskiy2021,Liu2021,Touvron2021b} and to judge the progress of the field of CV \cite{Khan2022, Rangel2024}.
Laptop & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n03642806_3615.JPEG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n03642806_3615_fg.PNG} & \includegraphics[max width=.1\columnwidth, max height=2cm, valign=c]{img/appendix_examples/n03642806_3615_bg.JPEG} & \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n03642806_3615_recombined_v0.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n03642806_3615_recombined_v1.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n03642806_3615_recombined_v11.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n03642806_3615_recombined_v14.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n03642806_3615_recombined_v15.JPEG} \includegraphics[width=.1\columnwidth, valign=c]{img/foraug_examples/n03642806_3615_recombined_v2.JPEG} \\ The advent of large-scale datasets, particularly ImageNet~\cite{Deng2009}, served as a catalyst for the rise of large-scale CV models~\cite{Krizhevsky2012, He2016} and remains the most important CV benchmark for more than a decade \cite{Krizhevsky2012,Touvron2022, Wortsman2022, He2016}.
\bottomrule % containing millions of labeled images across thousands of categories, has been instrumental in driving significant progress in this field.
\end{tabular} % ImageNet served as a catalyst for the rise of large-scale CV models~\cite{Krizhevsky2012, He2016} and remains the most important CV benchmark for more than a decade \cite{Krizhevsky2012,Touvron2022, Wortsman2022, He2016}.
} % It is used to train and evaluate the best models in the field.
\end{table} While traditionally, convolutional neural networks (CNNs) have been the go-to architecture in CV, Transformers \cite{Vaswani2017}, particularly the Vision Transformer (ViT) \cite{Dosovitskiy2021}, have emerged as a powerful alternative and go-to architecture, demonstrating
% These attention-based models have demonstrated
superior performance in various vision tasks, including image classification \cite{Wortsman2022,Yu2022,Carion2020,Zong2022,Wang2022a}.
Large-scale image classification is a central driver of modern computer vision: it benchmarks progress in computer vision~\cite{Khan2022,Rangel2024}, powers model pretraining~\cite{Dosovitskiy2021,Liu2021,Touvron2021b}, and yields representations that transfer broadly and underpin applications like medical diagnosis~\cite{Sanderson2022,Vezakis2024}, autonomous driving~\cite{Wang2023a}, and object recognition~\cite{Carion2020,He2017,Girshick2014}.
However, classification supervision is weak in an important sense: the label does not specify \emph{how} the class-object should appear.
In ImageNet~\cite{Deng2009} for example, objects often occur at characteristic positions and scales and co-occur with correlated scene context~\cite{Fatima2025,Barbu2019}.
% In datasets such as ImageNet, objects often occur at characteristic positions and scales and co-occur with correlated scene context~\cite{Fatima2025,Barbu2019}.
As a result, models rely on shortcuts like background cues, center bias, or size bias, that boost in-distribution accuracy but hurt robustness and transfer~\cite{Geirhos2020,Fatima2025,Barbu2019}.
Here, data augmentation is the default defense. Data augmentation is a key technique for training image classification models.
Standard transformations (crop/flip/color jitter) and stronger policies such as MixUp~\cite{Zhang2018a}/CutMix~\cite{Yun2019} and automated augmentation search~\cite{Cubuk2019,Cubuk2020} expand appearance diversity~\cite{Shorten2019,Xu2023d}. % , yet they largely preserve the original \emph{composition} of each image~\cite{Shorten2019,Xu2023d}. % A key technique for training image classification models, especially with limited data, is data augmentation.
However, their ability to teach spatial and compositional invariances is limited. Traditional augmentation methods, such as cropping, flipping, or color shifts, are commonly employed to increase data diversity~\cite{Xu2023d, Shorten2019}, but remain bound to existing image compositions.
This constraint matters especially for Vision Transformers (ViTs)~\cite{Dosovitskiy2021}: with weaker built-in spatial inductive biases than Convolutional Neural Networks (CNNs), ViTs must learn key equivariances (e.g., translation and scale robustness) primarily from data. While these preserve the images' semantic meaning, their ability to teach spatial invariances is limited.
Copy-paste style augmentations~\cite{Ghiasi2021,Kang2022} alter composition more aggressively by overlaying segmented objects onto other images. % the diversity of the training data and improve the model's performance~\cite{Xu2023d, Shorten2019}.
These are typically designed for detection or instance segmentation and rely on dense human annotations available for these tasks or use unconstrained dataset images as backgrounds. % These basic transformations, originally designed for CNNs, change the input images in a way that preserves their semantic meaning~\cite{Alomar2023}, but are limited to existing image compositions.
As a result, they do not offer fine-grained control of object position and scale, and they do not explicitly enforce that the pasted background is semantically neutral, creating ambiguous labels for classification. While combinations of these data augmentations are still used today, they originally were proposed to benefit CNNs.
However, the architectural differences of CNNs and Transformers suggest that the latter might benefit from different data augmentation strategies.
In particular, the self-attention mechanism, unlike a CNN, is not translation equivariant~\cite{RojasGomez2023,Ding2023a}, meaning that the model is not designed to understand the spatial relationships between pixels.
% This creates the need for novel data augmentation strategies tailored to the Transformer architecture.
% This fact opens a new design space for data augmentation strategies to help Transformers understand the basic invariances of image classification.
% Note that these traditional data augmentations are also limited by existing image compositions.
To encode compositional invariances directly in the training data, we propose \emph{Foreground-Background Augmentation} (\schemename), a controlled composition augmentation that \emph{explicitly factorizes each image into foreground and background, then recombines them for label-preserving, interpretable distribution shifts}. Recognizing that Transformers need to learn spatial relationships directly from data,
Concretely, \schemename uses off-the-shelf segmentation and inpainting models to (i) extract a foreground object and synthesize a class-consistent, semantically neutral background, and (ii) paste the foreground onto diverse neutral backgrounds while controlling its position and scale (see \Cref{tab:foraug-examples}). % and in general are usually trained on larger datasets~\cite{Kolesnikov2020},
Unlike prior copy-paste methods that simply overlay objects onto arbitrary scenes~\cite{Ghiasi2021,Ghiasi2021,Kang2022}, \schemename first removes and neutralizes the original background, then samples from well-defined distributions of backgrounds, object positions, and object sizes. we propose \schemename, a data augmentation method that makes these relationships explicit by recombining foreground objects with diverse backgrounds.
This explicit factorization preserves a clean label for the recombined image while providing direct control over compositions, enabling us to break spurious correlations while still fitting seamlessly into modern strong augmentation pipelines. % (see \Cref{fig:fig-1}). Thus, \schemename goes beyond existing image compositions and encodes desired invariances directly into the training data (see \Cref{fig:fig-1}).
% Throughout, we apply \schemename on top of strong augmentation pipelines (RandAugment, Mixup, CutMix), so any gains are complementary to these widely used techniques. % Inspired by this inductive bias of CNNs, that is not inherent to ViTs, we propose \schemename, a novel data augmentation scheme for image classification which makes the translation equivariance of CNNs explicit in the training data by recombining foreground objects at varying positions with different backgrounds.
% As it is important that any gains are complementary to strong augmentation pipelines (RandAugment, MixUp, CutMix), we apply \schemename on top of these widely used techniques. % In this paper, we address the challenge of effectively training Transformers for image classification by proposing \schemename, a novel data augmentation scheme for image classification, which combines foreground objects with different backgrounds.
To ensure that all gains are complementary to strong augmentation pipelines (RandAugment, MixUp, CutMix), we apply \schemename on top of these widely used techniques. % Applying \schemename to ImageNet gives rise to \name, a novel dataset that enables this data augmentation with with fine-grained control over the image composition.
Applying \schemename to a dataset like ImageNet is a two-step process:
(1)~We separate the foreground objects in ImageNet from their backgrounds, using an open-world object detector~\cite{Ren2024} and fill in the background in a neutral way using an object removal model~\cite{Sun2024,Suvorov2021}.
(2)~This allows us to then recombine any foreground object with any background on the fly, creating a highly diverse training set.
% During recombination, we can control important parameters, like the size and position of the foreground object, to help the model learn the spatial invariances necessary for image classification.
By exploiting the control over foreground size and position during recombination, \schemename explicitly teaches spatial invariances that image classification models typically must learn implicitly.
We show that using \schemename additionally to strong traditional data augmentation increases the model accuracy of Transformers by up to 4.5 p.p. on ImageNet and reduces the error rate by up to $7.3$ p.p. in downstream tasks.
Empirically, \schemename yields consistent accuracy gains across architectures, improving ImageNet top-1 accuracy by up to 6 p.p. and fine-grained downstream accuracy by up to 7.3 p.p., and even improving transfer when ImageNet accuracy is matched. Beyond training, \schemename becomes a diagnostic tool for analyzing model behavior and biases, when used during evaluation.
Beyond accuracy, training with \schemename substantially improves robustness on standard distribution-shift benchmarks, where we observe gains of roughly $2-19$ p.p. across ViT, Swin, and ResNet architectures. We utilize our control over the image distribution to measure a model's background robustness (by varying the choice of background), foreground focus (by leveraging our knowledge about the placement of the foreground object), center bias (by controlling position), and size bias (by controlling size).
These analyses provide valuable insights into model behavior and biases, which is crucial for model deployment and future robustness optimizations.
We show that training using \schemename significantly reduces all of these biases.
We make our code for \schemename and the output of \schemename's segmentation phase on ImageNet publicly available\footnote{Link will go here.} to facilitate further research.
Finally, the same control knobs enable \schemename to become a targeted diagnostic tool of shortcut reliance and model robustness. \subsection*{Contributions}
We quantify background reliance via controlled background swaps, and probe center and size biases through systematic position and scale sweeps, showing that training with \schemename reduces model biases. \begin{itemize}
\item We propose \schemename, a novel data augmentation scheme, that recombines objects and backgrounds. \schemename allows us to move beyond the (possibly biased) image compositions in the dataset while preserving label integrity.
\medskip \item We show that training a standard ViT using \schemename leads to up to 4.5 p.p. improved accuracy on ImageNet-1k and 7.3 p.p. on downstream tasks.
\noindent \item We propose novel \schemename-based metrics to analyze and quantify fine-grained biases of trained models: Background Robustness, Foreground Focus, Center Bias, and Size Bias. We show that \schemename significantly reduces these biases by encoding invariance that benefits ViT into the training data.
\textbf{Contributions}
\begin{itemize}[topsep=0pt]
\item \textbf{Controlled composition augmentation for classification.}
We introduce \schemename, a foreground-background factorization and recombination scheme for image classification that creates label-preserving training samples with explicit control over background identity, object position, and object scale.
\item \textbf{Accuracy and transfer gains.}
Training with \schemename, in addition to standard strong augmentation pipelines, improves ImageNet top-1 accuracy by up to 6 p.p., boosts fine-grained downstream accuracy by up to 7.3 p.p. and increases accuracy on shifted distributions by up to $19$ p.p.
\item \textbf{Controlled bias diagnostics and mitigation.}
Using the same controls during evaluation, we measure background reliance, foreground focus, and position/scale biases through targeted distribution shifts.
\schemename systematically reduces shortcut behaviors and model biases.
\end{itemize} \end{itemize}

View File

@@ -1,73 +0,0 @@
% !TeX root = ../main.tex
\section{Introduction}
\label{sec:intro}
% \begin{itemize}
% \item General Into Image classification
% \item ImageNet
% \item CNNs $\to$ Transformers
% \item Traditional Data Augmentation: CNNs
% \item Problems with that: Other model properties of Transformers
% \item Our approach: Recombining ImageNet forgrounds and backgrounds
% \end{itemize}
\begin{figure}
\centering
\includegraphics[width=.5\columnwidth]{img/fig-1.pdf}
\caption{Comparison of traditional image classification training and training when using \schemename. \schemename recombines foreground objects with different backgrounds each epoch, thus creating a more diverse training set. We still apply strong traditional data augmentation afterwards.}
\label{fig:fig-1}
\end{figure}
Image classification, a fundamental task in computer vision (CV), involves assigning labels to images from a set of categories.
It underpins a wide range of applications, like medical diagnosis~\cite{Sanderson2022,Vezakis2024}, autonomous driving~\cite{Wang2023a}, and object recognition~\cite{Carion2020,He2017,Girshick2014} and facilitates large-scale pretraining~\cite{Dosovitskiy2021,Liu2021,Touvron2021b}, and progress evaluation in CV~\cite{Khan2022, Rangel2024}.
% Furthermore, image classification is used for large-scale pretraining of vision models~\cite{Dosovitskiy2021,Liu2021,Touvron2021b} and to judge the progress of the field of CV \cite{Khan2022, Rangel2024}.
The advent of large-scale datasets, particularly ImageNet~\cite{Deng2009}, served as a catalyst for the rise of large-scale CV models~\cite{Krizhevsky2012, He2016} and remains the most important CV benchmark for more than a decade \cite{Krizhevsky2012,Touvron2022, Wortsman2022, He2016}.
% containing millions of labeled images across thousands of categories, has been instrumental in driving significant progress in this field.
% ImageNet served as a catalyst for the rise of large-scale CV models~\cite{Krizhevsky2012, He2016} and remains the most important CV benchmark for more than a decade \cite{Krizhevsky2012,Touvron2022, Wortsman2022, He2016}.
% It is used to train and evaluate the best models in the field.
While traditionally, convolutional neural networks (CNNs) have been the go-to architecture in CV, Transformers \cite{Vaswani2017}, particularly the Vision Transformer (ViT) \cite{Dosovitskiy2021}, have emerged as a powerful alternative and go-to architecture, demonstrating
% These attention-based models have demonstrated
superior performance in various vision tasks, including image classification \cite{Wortsman2022,Yu2022,Carion2020,Zong2023,Wang2023b}.
Data augmentation is a key technique for training image classification models.
% A key technique for training image classification models, especially with limited data, is data augmentation.
Traditional augmentation methods, such as cropping, flipping, or color shifts, are commonly employed to increase data diversity~\cite{Xu2023d, Shorten2019}, but remain bound to existing image compositions.
While these preserve the images' semantic meaning, their ability to teach spatial invariances is limited.
% the diversity of the training data and improve the model's performance~\cite{Xu2023d, Shorten2019}.
% These basic transformations, originally designed for CNNs, change the input images in a way that preserves their semantic meaning~\cite{Alomar2023}, but are limited to existing image compositions.
While combinations of these data augmentations are still used today, they originally were proposed to benefit CNNs.
However, the architectural differences of CNNs and Transformers suggest that the latter might benefit from different data augmentation strategies.
In particular, the self-attention mechanism, unlike a CNN, is not translation equivariant~\cite{RojasGomez2023,Ding2023a}, meaning that the model is not designed to understand the spatial relationships between pixels.
% This creates the need for novel data augmentation strategies tailored to the Transformer architecture.
% This fact opens a new design space for data augmentation strategies to help Transformers understand the basic invariances of image classification.
% Note that these traditional data augmentations are also limited by existing image compositions.
Recognizing that Transformers need to learn spatial relationships directly from data,
% and in general are usually trained on larger datasets~\cite{Kolesnikov2020},
we propose \schemename, a data augmentation method that makes these relationships explicit by recombining foreground objects with diverse backgrounds.
Thus, \schemename goes beyond existing image compositions and encodes desired invariances directly into the training data (see \Cref{fig:fig-1}).
% Inspired by this inductive bias of CNNs, that is not inherent to ViTs, we propose \schemename, a novel data augmentation scheme for image classification which makes the translation equivariance of CNNs explicit in the training data by recombining foreground objects at varying positions with different backgrounds.
% In this paper, we address the challenge of effectively training Transformers for image classification by proposing \schemename, a novel data augmentation scheme for image classification, which combines foreground objects with different backgrounds.
% Applying \schemename to ImageNet gives rise to \name, a novel dataset that enables this data augmentation with with fine-grained control over the image composition.
Applying \schemename to a dataset like ImageNet is a two-step process:
(1)~We separate the foreground objects in ImageNet from their backgrounds, using an open-world object detector~\cite{Ren2024} and fill in the background in a neutral way using an object removal model~\cite{Sun2025,Suvorov2022}.
(2)~This allows us to then recombine any foreground object with any background on the fly, creating a highly diverse training set.
% During recombination, we can control important parameters, like the size and position of the foreground object, to help the model learn the spatial invariances necessary for image classification.
By exploiting the control over foreground size and position during recombination, \schemename explicitly teaches spatial invariances that image classification models typically must learn implicitly.
We show that using \schemename additionally to strong traditional data augmentation increases the model accuracy of Transformers by up to 4.5 p.p. on ImageNet and reduces the error rate by up to $7.3$ p.p. in downstream tasks.
Beyond training, \schemename becomes a diagnostic tool for analyzing model behavior and biases, when used during evaluation.
We utilize our control over the image distribution to measure a model's background robustness (by varying the choice of background), foreground focus (by leveraging our knowledge about the placement of the foreground object), center bias (by controlling position), and size bias (by controlling size).
These analyses provide valuable insights into model behavior and biases, which is crucial for model deployment and future robustness optimizations.
We show that training using \schemename significantly reduces all of these biases.
We make our code for \schemename and the output of \schemename's segmentation phase on ImageNet publicly available\footnote{Link will go here.} to facilitate further research.
\subsection*{Contributions}
\begin{itemize}
\item We propose \schemename, a novel data augmentation scheme, that recombines objects and backgrounds. \schemename allows us to move beyond the (possibly biased) image compositions in the dataset while preserving label integrity.
\item We show that training a standard ViT using \schemename leads to up to 4.5 p.p. improved accuracy on ImageNet-1k and 7.3 p.p. on downstream tasks.
\item We propose novel \schemename-based metrics to analyze and quantify fine-grained biases of trained models: Background Robustness, Foreground Focus, Center Bias, and Size Bias. We show that \schemename significantly reduces these biases by encoding invariance that benefits ViT into the training data.
\end{itemize}

View File

@@ -3,20 +3,11 @@
%\begin{figure*}[ht!] %\begin{figure*}[ht!]
% \centering % \centering
% \includegraphics[width=.9\textwidth]{img/fig-2.pdf} % \includegraphics[width=.9\textwidth]{img/fig-2.pdf}
% \caption{Overview of \name. The data creation consists of two stages: (1, offline) Segmentation, where we segment the foreground objects from the background and fill in the background. (3, online) Recombination, where we combine the foreground objects with different backgrounds to create new samples. After recombination, we apply strong, commonly used augmentation policies.} % \caption{Overview of \name. The data creation consists of two stages: (1, offline) Segmentation, where we segment the foreground objects from the background and fill in the background. (2, online) Recombination, where we combine the foreground objects with different backgrounds to create new samples. After recombination, we apply strong, commonly used augmentation policies.}
% \label{fig:method} % \label{fig:method}
%\end{figure*} %\end{figure*}
\begin{figure*}[t] \section{\schemename (Method)}
\centering
\includegraphics[width=\textwidth]{img/fig-2.pdf}
\caption{Overview of \schemename.
We segment the foreground object and inpaint the removed region to obtain a neutral background (Offline, \Cref{sec:segmentation}).
We then paste the foreground onto a sampled background while controlling position and scale, then apply standard strong traditional augmentations (Online, \Cref{sec:recombination}).}
\label{fig:method}
\end{figure*}
\section{\schemename}
\label{sec:method} \label{sec:method}
% \begin{itemize} % \begin{itemize}
@@ -40,98 +31,83 @@
% We introduce \schemename, a data augmentation scheme designed to enhance Transformer training by explicitly separating and recombining foreground objects and backgrounds. % We introduce \schemename, a data augmentation scheme designed to enhance Transformer training by explicitly separating and recombining foreground objects and backgrounds.
% \schemename enhances transformer training by explicitly encoding spatial invariances that these need to learn explicitly in the data. % \schemename enhances transformer training by explicitly encoding spatial invariances that these need to learn explicitly in the data.
% \schemename involves two stages: Segmentation and Recombination, both visualized in \Cref{fig:method}. % \schemename involves two stages: Segmentation and Recombination, both visualized in \Cref{fig:method}.
We introduce \schemename, a data augmentation designed to enhance training by embedding spatial invariances, which Transformers would otherwise need to learn implicitly, directly into the training data. We introduce \schemename, a data augmentation designed to enhance Transformer training by embedding spatial invariances--which Transformers would otherwise need to learn implicitly--directly into the training data.
% It operates by explicitly segmenting and recombining foreground objects and backgrounds. % It operates by explicitly segmenting and recombining foreground objects and backgrounds.
\schemename comprises two distinct stages: Segmentation and Recombination. Both are illustrated in \Cref{fig:method}. \schemename comprises two distinct stages: Segmentation and Recombination. Both stages are illustrated in \Cref{fig:method}.
\subsection{Segmentation} \subsection{Segmentation}
\label{sec:segmentation} \label{sec:segmentation}
The offline segmentation stage produces reusable assets for recombination. The segmentation stage isolates the foreground objects and their corresponding backgrounds.
% The segmentation stage isolates the foreground objects and their corresponding backgrounds. % We then fill in the background in a visually plausible way~\cite{Sun2024} using a pretrained object-removal model.
For each labeled training image, we create a pair $(\mathrm{fg},\mathrm{bg})$ consisting of (\textit{i}) a foreground cut-out $\mathrm{fg}$ with an alpha mask and (\textit{ii}) an inpainted background image $\mathrm{bg}$ where the foreground region has been removed. We then fill the background using a pretrained object-removal model, producing visually plausible~\cite{Sun2024}, neutral scenes ready for recombination.
This stage is computed once offline and the results are stored for the recombination stage. This stage is computed once offline and the results are stored for the recombination stage.
\textbf{Generate candidate foreground masks.} First, foreground objects are detected and segmented from their backgrounds using a prompt-based segmentation model to exploit the classification datasets labels.
We obtain foreground candidates with Grounded SAM~\cite{Ren2024} (Grounding DINO~\cite{Liu2024a} + SAM~\cite{Kirillov2023}). We use the state-of-the-art Grounded SAM~\cite{Ren2024}, which is based on Grounding DINO~\cite{Liu2023e} and SAM~\cite{Kirillov2023}.
We leverage the dataset label by prompting the model with ``\code{a <class name>, a type of <object category>}''. The prompt we use is ``\code{a <class name>, a type of <object category>}'', where \code{<class name>} is the specific name of the objects class as defined by the dataset and \code{<object category>} is a the broader category of the object.
Here \code{<object category>} is the immediate WordNet hypernym of the class (e.g., ``sorrel'' $\rightarrow$ ``horse''), which improves robustness when the class name is rare or overly specific. The \code{<object category>} guides the segmentation model towards the correct object in case the \code{<class name>} alone is too specific.
This can be the case with prompts like ``sorrel'' or ``guenon'', where the more general name ``horse'' or ``monkey'' is more ubiquitous. This can be the case with prompts like ``sorrel'' or ``guenon'', where the more general name ``horse'' or ``monkey'' is more helpful.
To increase recall, we generate up to $N=3$ masks per image by iteratively moving one level up the hypernym chain (e.g., ``sorrel'' $\rightarrow$ ``horse'' $\rightarrow$ ``equine'' $\dots$). We derive the \code{<object category>} from the WordNet hierarchy, using the immediate hypernym.
We merge near-duplicate masks with pairwise IoU $\ge 0.9$, yielding a small set of $n_i<N$ candidate masks per image $i$.
% We iteratively extract up to $n$ foreground masks for each dataset-image, using different more and more general prompts based on the more general synsets of WordNet (e.g. ``a sorrel, a type of horse'', ``a horse, a type of equine'', ...).
We iteratively extract $n$ foreground masks for each dataset-image, creating prompts by going one hypernym up the WordNet-tree each step (e.g. ``a sorrel, a type of horse'', ``a horse, a type of equine'', ...).
Masks that are very similar, with a pairwise IoU of at least $0.9$, are merged.
The output is a set of masks delineating the foreground objects and the backgrounds.
We select the best mask per image (according to \Cref{eq:filtering-score}) in a later filtering step, described below. We select the best mask per image (according to \Cref{eq:filtering-score}) in a later filtering step, described below.
\textbf{Create neutral backgrounds via object removal.} First, an inpainting model that is specifically optimized to remove objects from images, such as LaMa~\cite{Suvorov2021} or Attentive Eraser~\cite{Sun2024}, is used to inpaint the foreground regions in the backgrounds.
Given a candidate mask, we remove the masked region and inpaint it using an object-removal model (LaMa~\cite{Suvorov2022} or Attentive Eraser~\cite{Sun2025}). Then, to ensure the quality of the foregrounds and the neutral background images, we select a foreground/background pair (for each dataset-image) from the $\leq n$ variants we have extracted and infilled in the previous steps.
This produces a visually plausible, ``neutral'' candidate background that can be paired with many foregrounds. Using an ensemble $E$ of six ViT, ResNet, and Swin Transformer models pretrained on the original dataset, we select the foreground/background pair that maximizes foreground performance while minimizing the performance on the background and size of the foreground.
For an image $i$ we now have $n_i$ foreground objects, extracted from $i$ by cutting out the masked region, each paired with a background where the same mask has been infilled. For each model $m \in E$, we predict the score of the ground truth class $c$ on the foreground $\mathrm{fg}$ and background $\mathrm{bg}$ and weigh these with the size $\operatorname{size}(\cdot)$ in number of pixels according to:
% $c$ is the correct foreground class, $\mathrm{fg}$, and $\mathrm{bg}$ are the foreground and background and $\operatorname{size}(\cdot)$ is the size in number of pixels.
\textbf{Select a high-quality pair.}
Different masks can trade off including the full object versus leaking class cues into the background.
We therefore score each candidate pair using an ensemble $E$ of six pretrained classifiers (ViT/ResNet/Swin) trained on the original dataset.
Intuitively, we prefer (\textit{i}) foregrounds that strongly support the ground-truth class and (\textit{ii}) backgrounds that do \emph{not} support the ground-truth class, while (\textit{iii}) discouraging overly large foreground regions.
For each model $m \in E$, we compute the class scores of the ground truth class $c$, $\P[m(\mathrm{fg})=c]$ on the foreground (with solid-gray background) and $\P[m(\mathrm{bg})=c]$ on the background and combine them with a prior $\operatorname{size}(\cdot)$ (pixel count):
\begin{align} \begin{split} \label{eq:filtering-score} \begin{align} \begin{split} \label{eq:filtering-score}
\text{score}(\mathrm{fg}, \mathrm{bg}, c) &= \log \left( \sum_{m \in E} \frac{\P[m(\mathrm{fg}) = c]}{\abs{E}} \right) \text{score}(\mathrm{fg}, \mathrm{bg}, c) &= \log \left( \frac{1}{\abs{E}} \sum_{m \in E} \P[m(\mathrm{fg}) = c] \right) \\
+ \log \left( 1 - \sum_{m \in E} \frac{\P[m(\mathrm{bg}) = c]}{\abs E} \right) \\ & + \log \left( 1 - \frac{1}{\abs E} \sum_{m \in E} \P[m(\mathrm{bg}) = c] \right) \\
& + \lambda \log \left( 1 - \abs{\frac{\operatorname{size}(\mathrm{fg})}{\operatorname{size}(\mathrm{bg})} - \eps} \right). & + \lambda \log \left( 1 - \abs{\frac{\operatorname{size}(\mathrm{fg})}{\operatorname{size}(\mathrm{bg})} - \eps} \right).
\end{split} \end{align} \end{split} \end{align}
% We set $\lambda = 2$ and $\eps = 0.1$ via a small hyperparameter search on a manually annotated subset. % We use $E$ is the ensemble of models and $m$ is a pretrained model, $c$ is the correct foreground class, $\mathrm{fg}$, and $\mathrm{bg}$ are the foreground and background and $\operatorname{size}(\cdot)$ is the size in number of pixels.
We run a hyperparameter search using a manually annotated subset of foreground/background variants to find the factors in \Cref{eq:filtering-score}: $\lambda = 2$ and $\eps = 0.1$. We run a hyperparameter search using a manually annotated subset of foreground/background variants to find the factors in \Cref{eq:filtering-score}: $\lambda = 2$ and $\eps = 0.1$.
For each image, we keep the candidate mask with the highest score. % The \textit{optimal foreground size} of $10\%$ of the full image balances the smallest possible foreground size that encompasses all the respective class information in the image with still conveying the foreground information after pasting it onto another background.
% This filtering step ensures we segment all the relevant foreground objects.
\textbf{Filter low-quality backgrounds.} Finally, we filter out backgrounds that are largely infilled, as these tend to be overly synthetic and do not carry much information (see the supplementary material).
Finally, we discard backgrounds that are heavily ($\geq 80\%$) inpainted, as they tend to look synthetic and provide little useful diversity (see supplementary). % We ablate this choice in \Cref{sec:ablation}.
This step filters out $10\%$ of backgrounds. % While the computational cost for the segmentation stage is significant, this is a one-time calculation whose results can be reused in subsequent experiments (see the supplementary material for details).
Although the segmentation stage is computational overhead, it is a one-time cost with results that can be reused across experiments (see the supplementary material for details).
Although segmentation is the main computational overhead, it is performed once offline and reused across all training runs. In summary, we factorize the dataset into a set of foreground objects with a transparent background and a set of diverse backgrounds per class.
On NVIDIA H100 GPUs, the segmentation stage computes at a rate of $5 338.6 \frac{\text{img}}{\text{GPU} \times \text{h}}$ when inpainting with LaMa. The next step is to recombine these, before applying other common data augmentation operations during training.
For ImageNet this comes down to just under $30$ hours on a single node.
At roughly twice the cost of a single ViT-B training run ($\approx 14$ hours), this is a modest investment that is amortized over every subsequent experiment the dataset is used in.
For details see the supplementary material.
% Compare this to $\approx 14$ hours for training ViT-B on ImageNet once.
The output of the segmentation stage is a collection of foreground cut-outs (with transparency) and a pool of diverse, neutral backgrounds, which we use in the online recombination stage.
For ImageNet, we provide pre-computed segmentation output\footnote{\code{URL will go here}}.
\subsection{Recombination} \subsection{Recombination}
\label{sec:recombination} \label{sec:recombination}
In each epoch, the recombination stage generates a recombined training sample for each foreground by (\textit{i}) choosing a background, (\textit{ii}) choosing a target foreground size, (\textit{iii}) sampling a placement, and (\textit{iv}) pasting the foreground using its alpha mask. The recombination stage, performed online during training, combines the foreground objects with different backgrounds to create new training samples.
This exposes the model to controlled changes in context and spatial layout that are largely absent from standard augmentation. For each object, we follow the pipeline of: Pick an appropriate background, resize it to a fitting size, and place it in the background image.
Through this step, we expose the model to variations beyond the image compositions of the dataset.
\textbf{Background sampling.} For each foreground object, we sample a background using one of the following strategies:
For each foreground object, we draw a background using one of three increasingly challenging strategies: (1) the original image background, (2) the set of backgrounds from the same class, or (3) the set of all possible backgrounds.
(\textit{i}) \textit{Original}: use the object's own inpainted background (no context shift); These sets are trading off the amount of information the model can learn from the background against the diversity of new images created.
(\textit{ii}) \textit{Same-class}: sample a background from the pool of backgrounds belonging to the same class (slight, but plausible context shift); In each epoch, each foreground object is seen exactly once, but a background may appear multiple times.
(\textit{iii}) \textit{All-classes}: sample from the pool of all inpainted backgrounds (large context shift).
These strategies trade off context diversity against semantic plausibility.
We ensure that each foreground is used exactly once per epoch; backgrounds may repeat.
\textbf{Foreground scaling.} The selected foreground is resized based on its relative size within its original image and the relative size of the original foreground in the selected background image.
Let $r_{\text{fg}}$ denote the relative foreground area in the source image of the foreground, and $r_{\text{bg}}$ the relative foreground area in the source image of the background. % of the \emph{original} foreground (before inpainting) in the chosen background image. The final size is randomly selected from a 30\% range around upper and lower limits ($s_u$ and $s_l$), based on the original sizes.
We compute the lower/upper size limits $(s_l, s_u)$ from these two ratios using one of two variants: % \begin{align}
(\textit{i}) \emph{mean} sets $(s_l, s_u)$ using the mean of $r_{\text{fg}}$ and $r_{\text{bg}}$, while % s \sim \mathcal U \left[ (1 - 0.3) s_l, (1 + 0.3) s_u \right].
(\textit{ii}) \emph{range} uses the min/max to preserve a wider scale range. % \end{align}
Then, we sample the final scale from a $\pm 30\%$ interval around them and resize the foreground to this scale, while keeping the aspect ratio. To balance the size of the foreground and that of the backgrounds original foreground, the upper and lower limit $s_u$ and $s_l$ are set to the mean or range of both sizes, depending on the foreground size strategy: \emph{mean} or \emph{range}.
The resized foreground is then placed at a random position within the background image.
To more seamlessly integrate the foreground, we apply a Gaussian blur with ${\sigma \in [\frac{\sigma_{\text{max}}}{10}, \sigma_{\text{max}}]}$, inspired by the standard range for the Gaussian blur operation in \cite{Touvron2022}, to the foreground's alpha-mask.
\textbf{Placement and boundary smoothing.} We can apply standard data augmentation techniques in two modes:
We paste the resized foreground at a uniformly random location within the background. Either we apply all augmentations to the recombined image, or we apply the cropping and resizing to the background only and then apply the other augmentations after recombination.
To reduce cut-and-paste artifacts, we slightly soften the alpha mask boundary by applying a Gaussian blur with $\sigma \in [\frac{\sigma_{\text{max}}}{10}, \sigma_{\text{max}}]$, following the range used in modern augmentation~\cite{Touvron2022}. % While for the second mode, the foreground object will always be fully visible, the first mode uses the data augmentations in the same way they would be used for the baseline dataset.
% For example recombined images see \Cref{tab:foraug-examples}. % The second mode ensures the foreground object remains fully visible, while the first mode mirrors standard data augmentation practices.
The first mode mirrors standard augmentation practice, whereas the second one ensures the foreground object remains fully visible.
% \textbf{Interaction with standard augmentation.} We experiment with a constant mixing ratio, or a linear or cosine annealing schedule that increases the amount of images from the original dataset over time.
% We support two augmentation orders: The mixing ratio acts as a probability of selecting an image from the original dataset;
% (\textit{i}) apply the full augmentation pipeline after recombination; or otherwise, an image with the same foreground is recombined using \schemename, ensuring each object is seen once per epoch.
% (\textit{ii}) apply crop+resize to the background first (to keep the full foreground visible), then recombine, then apply the remaining augmentations. % Thus, we still ensure each foreground is seen once per epoch.
% The former matches standard training exactly; the latter isolates composition changes from random cropping. The recombination stage is designed to be parallelized on the CPU during training and thus does not impact training time (see supplementary material for details).
\textbf{Mixing with original images.}
We optionally mix recombined samples with unmodified dataset images.
A mixing ratio $p$ acts as the probability of drawing the original image; otherwise we use its foreground and apply \schemename.
We consider constant $p$ as well as linear/cosine schedules that increase $p$ over training.
Finally, we apply standard data augmentation techniques on the resulting images.
The online recombination is CPU-parallel and does not measurably increase training time.
We find a $\approx 1\%$ increase in average step-time (see supplementary).

View File

@@ -1,120 +0,0 @@
% !TeX root = ../main.tex
%\begin{figure*}[ht!]
% \centering
% \includegraphics[width=.9\textwidth]{img/fig-2.pdf}
% \caption{Overview of \name. The data creation consists of two stages: (1, offline) Segmentation, where we segment the foreground objects from the background and fill in the background. (2, online) Recombination, where we combine the foreground objects with different backgrounds to create new samples. After recombination, we apply strong, commonly used augmentation policies.}
% \label{fig:method}
%\end{figure*}
\begin{figure*}[t]
\centering
\includegraphics[width=\textwidth]{img/fig-2.pdf}
\caption{Overview of \schemename. The data creation consists of two stages: Segmentation (offline, \Cref{sec:segmentation}), where we segment the foreground objects from the background and fill in the background. Recombination (online, \Cref{sec:recombination}), where we combine the foreground objects with different backgrounds to create new samples. After recombination, we apply strong, commonly used augmentation policies.}
\label{fig:method}
\end{figure*}
\section{\schemename (Method)}
\label{sec:method}
% \begin{itemize}
% \item[1.] Segment ImageNet
% \item Detect and Cutout Foreground
% \item Multiple foreground possibilities
% \item Foreground mask merging
% \item Background infills
% \item Foreground/Background Filtering
% \item [2.] Recombination
% \item Which foreground \& Background
% \item Background pruning
% \item size
% \item positioning
% \item Border smoothing
% \item Dealing with other data augmentations/transformations
% \end{itemize}
% We propose a novel dataset, called \name, that improves image classification performance by explicitly separating and recombining foreground objects and plain backgrounds.
% \name consists of two stages: Segmentation and recombination. Both are visualized in \Cref{fig:method}.
% We introduce \schemename, a data augmentation scheme designed to enhance Transformer training by explicitly separating and recombining foreground objects and backgrounds.
% \schemename enhances transformer training by explicitly encoding spatial invariances that these need to learn explicitly in the data.
% \schemename involves two stages: Segmentation and Recombination, both visualized in \Cref{fig:method}.
We introduce \schemename, a data augmentation designed to enhance Transformer training by embedding spatial invariances--which Transformers would otherwise need to learn implicitly--directly into the training data.
% It operates by explicitly segmenting and recombining foreground objects and backgrounds.
\schemename comprises two distinct stages: Segmentation and Recombination. Both are illustrated in \Cref{fig:method}.
\subsection{Segmentation}
\label{sec:segmentation}
The segmentation stage isolates the foreground objects and their corresponding backgrounds.
% We then fill in the background in a visually plausible way~\cite{Sun2025} using a pretrained object-removal model.
We then fill the background using a pretrained object-removal model, producing visually plausible~\cite{Sun2025}, neutral scenes ready for recombination.
This stage is computed once offline and the results are stored for the recombination stage.
First, foreground objects are detected and segmented from their backgrounds using a prompt-based segmentation model to exploit the classification datasets labels.
We use the state-of-the-art Grounded SAM~\cite{Ren2024}, which is based on Grounding DINO~\cite{Liu2024a} and SAM~\cite{Kirillov2023}.
The prompt we use is ``\code{a <class name>, a type of <object category>}'', where \code{<class name>} is the specific name of the objects class as defined by the dataset and \code{<object category>} is a the broader category of the object.
The \code{<object category>} guides the segmentation model towards the correct object in case the \code{<class name>} alone is too specific.
This can be the case with prompts like ``sorrel'' or ``guenon'', where the more general name ``horse'' or ``monkey'' is more helpful.
We derive the \code{<object category>} from the WordNet hierarchy, using the immediate hypernym.
% We iteratively extract up to $n$ foreground masks for each dataset-image, using different more and more general prompts based on the more general synsets of WordNet (e.g. ``a sorrel, a type of horse'', ``a horse, a type of equine'', ...).
We iteratively extract $n$ foreground masks for each dataset-image, creating prompts by going one hypernym up the WordNet-tree each step (e.g. ``a sorrel, a type of horse'', ``a horse, a type of equine'', ...).
Masks that are very similar, with a pairwise IoU of at least $0.9$, are merged.
The output is a set of masks delineating the foreground objects and the backgrounds.
We select the best mask per image (according to \Cref{eq:filtering-score}) in a later filtering step, described below.
First, an inpainting model that is specifically optimized to remove objects from images, such as LaMa~\cite{Suvorov2022} or Attentive Eraser~\cite{Sun2025}, is used to inpaint the foreground regions in the backgrounds.
Then, to ensure the quality of the foregrounds and the neutral background images, we select a foreground/background pair (for each dataset-image) from the $\leq n$ variants we have extracted and infilled in the previous steps.
Using an ensemble $E$ of six ViT, ResNet, and Swin Transformer models pretrained on the original dataset, we select the foreground/background pair that maximizes foreground performance while minimizing the performance on the background and size of the foreground.
For each model $m \in E$, we predict the score of the ground truth class $c$ on the foreground $\mathrm{fg}$ and background $\mathrm{bg}$ and weigh these with the size $\operatorname{size}(\cdot)$ in number of pixels according to:
% $c$ is the correct foreground class, $\mathrm{fg}$, and $\mathrm{bg}$ are the foreground and background and $\operatorname{size}(\cdot)$ is the size in number of pixels.
\begin{align} \begin{split} \label{eq:filtering-score}
\text{score}(\mathrm{fg}, \mathrm{bg}, c) &= \log \left( \sum_{m \in E} \frac{\P[m(\mathrm{fg}) = c]}{\abs{E}} \right)
+ \log \left( 1 - \sum_{m \in E} \frac{\P[m(\mathrm{bg}) = c]}{\abs E} \right) \\
& + \lambda \log \left( 1 - \abs{\frac{\operatorname{size}(\mathrm{fg})}{\operatorname{size}(\mathrm{bg})} - \eps} \right).
\end{split} \end{align}
% We use $E$ is the ensemble of models and $m$ is a pretrained model, $c$ is the correct foreground class, $\mathrm{fg}$, and $\mathrm{bg}$ are the foreground and background and $\operatorname{size}(\cdot)$ is the size in number of pixels.
We run a hyperparameter search using a manually annotated subset of foreground/background variants to find the factors in \Cref{eq:filtering-score}: $\lambda = 2$ and $\eps = 0.1$.
% The \textit{optimal foreground size} of $10\%$ of the full image balances the smallest possible foreground size that encompasses all the respective class information in the image with still conveying the foreground information after pasting it onto another background.
% This filtering step ensures we segment all the relevant foreground objects.
Finally, we filter out backgrounds that are largely infilled, as these tend to be overly synthetic and do not carry much information (see the supplementary material).
% We ablate this choice in \Cref{sec:ablation}.
% While the computational cost for the segmentation stage is significant, this is a one-time calculation whose results can be reused in subsequent experiments (see the supplementary material for details).
Although the segmentation stage is computational overhead, it is a one-time cost with results that can be reused across experiments (see the supplementary material for details).
In summary, we factorize the dataset into a set of foreground objects with a transparent background and a set of diverse backgrounds per class.
The next step is to recombine these, before applying other common data augmentation operations during training.
\subsection{Recombination}
\label{sec:recombination}
The recombination stage, performed online during training, combines the foreground objects with different backgrounds to create new training samples.
For each object, we follow the pipeline of: Pick an appropriate background, resize it to a fitting size, and place it in the background image.
Through this step, we expose the model to variations beyond the image compositions of the dataset.
For each foreground object, we sample a background using one of the following strategies:
(1) the original image background, (2) the set of backgrounds from the same class, or (3) the set of all possible backgrounds.
These sets are trading off the amount of information the model can learn from the background against the diversity of new images created.
In each epoch, each foreground object is seen exactly once, but a background may appear multiple times.
The selected foreground is resized based on its relative size within its original image and the relative size of the original foreground in the selected background image.
The final size is randomly selected from a 30\% range around upper and lower limits ($s_u$ and $s_l$), based on the original sizes.
% \begin{align}
% s \sim \mathcal U \left[ (1 - 0.3) s_l, (1 + 0.3) s_u \right].
% \end{align}
To balance the size of the foreground and that of the backgrounds original foreground, the upper and lower limit $s_u$ and $s_l$ are set to the mean or range of both sizes, depending on the foreground size strategy: \emph{mean} or \emph{range}.
The resized foreground is then placed at a random position within the background image.
To more seamlessly integrate the foreground, we apply a Gaussian blur with ${\sigma \in [\frac{\sigma_{\text{max}}}{10}, \sigma_{\text{max}}]}$, inspired by the standard range for the Gaussian blur operation in \cite{Touvron2022}, to the foreground's alpha-mask.
We can apply standard data augmentation techniques in two modes:
Either we apply all augmentations to the recombined image, or we apply the cropping and resizing to the background only and then apply the other augmentations after recombination.
% While for the second mode, the foreground object will always be fully visible, the first mode uses the data augmentations in the same way they would be used for the baseline dataset.
% The second mode ensures the foreground object remains fully visible, while the first mode mirrors standard data augmentation practices.
The first mode mirrors standard augmentation practice, whereas the second one ensures the foreground object remains fully visible.
We experiment with a constant mixing ratio, or a linear or cosine annealing schedule that increases the amount of images from the original dataset over time.
The mixing ratio acts as a probability of selecting an image from the original dataset;
otherwise, an image with the same foreground is recombined using \schemename, ensuring each object is seen once per epoch.
% Thus, we still ensure each foreground is seen once per epoch.
The recombination stage is designed to be parallelized on the CPU during training and thus does not impact training time (see supplementary material for details).

View File

@@ -3,44 +3,44 @@
\section{Related Work} \section{Related Work}
\label{sec:related_work} \label{sec:related_work}
\textbf{Data Augmentation for Image Classification.} \paragraph{Data Augmentation for Image Classification}
Data augmentation is a crucial technique for improving the model performance and generalization. Data augmentation is a crucial technique for improving the performance and generalization of image classification models.
Traditional augmentation strategies rely on simple geometric or color-space transformations like cropping, flipping, rotation, blurring, color jittering, or random erasing~\cite{Zhong2020} to increase training data diversity without changing the semantic meaning. Traditional augmentation strategies rely on simple geometric or color-space transformations like cropping, flipping, roatation, blurring, color jittering, or random erasing \cite{Zhong2017} to increase the diversity of the training data without changing their semantic meaning.
With the advent of ViTs~\cite{Dosovitskiy2021}, new data augmentation operations like PatchDropout~\cite{Liu2022d} have been proposed. With the advent of Vision Transformers, new data augmentation operations like PatchDropout \cite{Liu2022d} have been proposed.
Other transformations like MixUp~\cite{Zhang2018a}, CutMix~\cite{Yun2019}, or random cropping and patching~\cite{Takahashi2018} combine multiple input images. Other transformations like Mixup \cite{Zhang2018a}, CutMix \cite{Yun2019}, or random cropping and patching \cite{Takahashi2018} combine multiple input images.
These simple transformations are usually bundled to form more complex augmentation policies like AutoAugment~\cite{Cubuk2019} and RandAugment~\cite{Cubuk2020}, or 3-Augment~\cite{Touvron2022}. %, which is optimized to train a ViT. These simple transformations are usually bundled to form more complex augmentation policies like AutoAugment \cite{Cubuk2018} and RandAugment \cite{Cubuk2019},
For a general overview of data augmentation for image classification, we refer to Shorten et al.~\cite{Shorten2019} and Xu et al.~\cite{Xu2023d}. % which automatically search for optimal augmentation policies
or 3-augment \cite{Touvron2022} which is optimized to train a ViT.
For a general overview of data augmentation techniques for image classification, we refer to \citet{Shorten2019, Xu2023d}.
We advance these general augmentations by introducing \schemename to explicitly separate objects and backgrounds for image classification, allowing us to move beyond image compositions from the dataset. We build upon these general augmentations by introducing a novel approach to explicitly separate objects and backgrounds for image classification, allowing us to -- unlike these basic transformations -- move beyond dataset image compositions.
Thus, \schemename unlocks performance improvements and bias reduction not possible with traditional data augmentation. Our approach is used additionally to strong traditional techniques to improve performance and reduce biases.
% \schemename is used additionally to traditional augmentation techniques to improve performance and reduce biases.
\textbf{Copy-Paste Augmentation.} \paragraph{Copy-Paste Augmentation}
The copy-paste augmentation~\cite{Ghiasi2021}, which is used only for object detection~\cite{Shermaine2025,Ghiasi2021} and instance segmentation~\cite{Werman2022,Ling2022}, involves copying segmented objects from one image and pasting them onto another. The copy-paste augmentation \cite{Ghiasi2020}, which is used only for object detection \cite{Shermaine2025,Ghiasi2020} and instance segmentation \cite{Werman2021,Ling2022}, involves copying segmented objects from one image and pasting them onto another.
While typically human annotated segmentation masks are used to extract the foreground objects, other foreground sources have been explored, like 3D models~\cite{Hinterstoisser2019} and pretrained object-detection models for use on objects on white background~\cite{Dwibedi2017} or synthetic images~\cite{Ge2023}. While typically human annotated segmentation masks are used to extract the foreground objects, other foregound sources have been explored, like 3D models \cite{Hinterstoisser2019} and pretrained object-detection models for use on objects on white background \cite{Dwibedi2017} or synthetic images \cite{Ge2023}.
Kang et al.~\cite{Kang2022} apply copy-paste as an alternative to CutMix in image classification, but they do not shift the size or position of the foregrounds and use dataset images (with object) as backgrounds. % DeePaste \cite{Werman2021} focuses on using inpainting for a more seamless integration of the pasted object.
\cite{Kang2022} apply copy-paste as an alternative to CutMix in image classification, but they do not shift the size or position of the foregrounds and use normal dataset images as backgrounds.
% Unlike these methods, \schemename focuses on image classification.
% While these methods paste objects onto another image (with a different foreground) or on available or rendered background images of the target scene, we extract foreground objects and fill in the resulting holes in the background in a semantically neutral way.
Unlike prior copy-paste methods that overlay objects, \schemename extracts foregrounds and replaces their backgrounds with semantically neutral fills, thereby preserving label integrity while enabling controlled and diverse recombination. Unlike prior copy-paste methods that overlay objects, \schemename extracts foregrounds and replaces their backgrounds with semantically neutral fills, thereby preserving label integrity while enabling controlled and diverse recombination.
% This way, we are preserving label integrity while also having diverse, neutral backgrounds available for recombination, enabling a controlled and diverse manipulation of image composition.
\textbf{Generative data augmentation.} \begin{figure*}[ht!]
Recent work uses generative models to synthesize additional training images, e.g., via GANs or diffusion models driven by text prompts or attribute labels~\cite{Lu2022,Trabucco2024,Islam2024}. \centering
Concurrently to our work, AGA~\cite{Rahat2025} combines LLMs, diffusion models, and segmentation to generate fully synthetic backgrounds from text prompts, onto which real foregrounds are pasted. \includegraphics[width=.9\textwidth]{img/fig-2.pdf}
These synthetic images are appended to the original training set. \caption{Overview of \schemename. The data creation consists of two stages: Segmentation (offline, \Cref{sec:segmentation}), where we segment the foreground objects from the background and fill in the background. Recombination (online, \Cref{sec:recombination}), where we combine the foreground objects with different backgrounds to create new samples. After recombination, we apply strong, commonly used augmentation policies.}
\label{fig:method}
\end{figure*}
While AGA focuses on increasing diversity via prompt-driven background synthesis, \schemename uses generative models differently: \paragraph{Model robustness evaluation}
We apply inpainting only to locally neutralize the original object region, yielding semi-synthetic backgrounds that preserve the global layout, style, and characteristics of real dataset images.
% AGA's focus on synthetic background is likely to produce a shifted, or even collapsed background image distribution~\cite{Zverev2025,Shumailov2024,Adamkiewicz2026}.
Fully synthetic, prompt-generated backgrounds are likely to change, the effective background distribution, especially when prompts or generators are biased~\cite{Zverev2025,Shumailov2024,Adamkiewicz2026}.
We then do online recombination of real foregrounds with these neutralized, dataset-consistent backgrounds under explicit control of object position and scale.
Thus, \schemename acts as a dynamic large-scale augmentation method while AGA is statically expanding small-scale training sets with synthetic data.
\textbf{Model robustness evaluation.}
Evaluating model robustness to various image variations is critical for understanding and improving model generalization. Evaluating model robustness to various image variations is critical for understanding and improving model generalization.
Datasets like ImageNet-A~\cite{Hendrycks2021}, ImageNet-C~\cite{Hendrycks2019} and ImageNet-P~\cite{Hendrycks2019} introduce common corruptions and perturbations. Datasets like ImageNet-C \cite{Hendrycks2019} and ImageNet-P \cite{Hendrycks2019} introduce common corruptions and perturbations.
ImageNet-E~\cite{Li2023e} evaluates model robustness against a collection of distribution shifts. ImageNet-E \cite{Li2023e} evaluates model robustness against a collection of distribution shifts.
Other datasets, such as ImageNet-D~\cite{Zhang2024f} and ImageNet-R~\cite{Hendrycks2021a}, focus on varying background, texture, and material, but rely on synthetic data. Other datasets, such as ImageNet-D \cite{Zhang2024f}, focus on varying background, texture, and material, but rely on synthetic data.
Stylized ImageNet~\cite{Geirhos2019} investigates the impact of texture changes. Stylized ImageNet \cite{Geirhos2018} investigates the impact of texture changes.
ImageNet-9~\cite{Xiao2020} explores background variations using segmented images for a 9-class subset of ImageNet with artificial backgrounds. ImageNet-9 \cite{Xiao2020} explores background variations using segmented images, but backgrounds are often artificial.
In contrast to these existing datasets, which are used only for evaluation, \schemename provides fine-grained control over foreground object placement, size, and background selection, enabling a precise and comprehensive analysis of specific model biases within the context of a large-scale, real-world image distribution. In contrast to these existing datasets, which are used only for evaluation, \schemename provides fine-grained control over foreground object placement, size, and background selection, enabling a precise and comprehensive analysis of specific model biases within the context of a large-scale, real-world image distribution.
As \schemename also provides controllable training data generation, it goes beyond simply measuring robustness to actively improving it through training. As \schemename also provides controllable training set generation, it goes beyond simply measuring robustness to actively improving it through training.

File diff suppressed because it is too large Load Diff

21
supplementary.brf Normal file
View File

@@ -0,0 +1,21 @@
\backcite {Bates1955}{{1}{A}{figure.caption.1}}
\backcite {Jonhson1995}{{1}{A}{figure.caption.1}}
\backcite {You2020}{{2}{1}{table.caption.4}}
\backcite {Touvron2022}{{2}{1}{table.caption.4}}
\backcite {Touvron2021b}{{2}{1}{table.caption.4}}
\backcite {Yun2019}{{2}{1}{table.caption.4}}
\backcite {Zhong2017}{{2}{1}{table.caption.4}}
\backcite {Cubuk2019}{{2}{1}{table.caption.4}}
\backcite {Zhang2018a}{{2}{1}{table.caption.4}}
\backcite {Yun2019}{{2}{1}{table.caption.4}}
\backcite {Nauen2025}{{3}{C}{table.caption.5}}
\backcite {Touvron2022}{{3}{C}{table.caption.5}}
\backcite {Touvron2021b}{{3}{C}{table.caption.5}}
\backcite {Nauen2025}{{3}{C}{table.caption.5}}
\backcite {Paszke2019}{{3}{C}{table.caption.5}}
\backcite {Wightman2019}{{3}{C}{table.caption.5}}
\backcite {Deng2009}{{3}{D}{table.caption.7}}
\backcite {Suvorov2021}{{5}{E}{table.caption.8}}
\backcite {Sun2024}{{5}{E}{table.caption.8}}
\backcite {Ren2024}{{7}{2}{figure.caption.10}}
\backcite {Ren2024}{{7}{F}{figure.caption.10}}

Binary file not shown.

View File

@@ -1,112 +1,67 @@
\documentclass[runningheads]{llncs} % CVPR 2026 Paper Template; see https://github.com/cvpr-org/author-kit
% --------------------------------------------------------------- \documentclass[10pt,twocolumn,letterpaper]{article}
% Include basic ECCV package
% TODO REVIEW: Insert your submission number below by replacing '*****' %%%%%%%%% PAPER TYPE - PLEASE UPDATE FOR FINAL VERSION
% TODO FINAL: Comment out the following line for the camera-ready version % \usepackage{cvpr} % To produce the CAMERA-READY version
\usepackage[review,year=2026,ID=1741 -- Supplementary]{eccv} \usepackage[review]{cvpr} % To produce the REVIEW version
% % TODO FINAL: Un-comment the following line for the camera-ready version % \usepackage[pagenumbers]{cvpr} % To force page numbers, e.g. for an arXiv version
% \usepackage{eccv}
% OPTIONAL: Un-comment the following line for a version which is easier to read % Import additional packages in the preamble file, before hyperref
% on small portrait-orientation screens (e.g., mobile phones, or beside other windows) \usepackage[pagebackref,breaklinks,colorlinks,allcolors=cvprblue]{hyperref}
%\usepackage[mobile]{eccv} \input{packages}
% ---------------------------------------------------------------
% Other packages
% Commonly used abbreviations (\eg, \ie, \etc, \cf, \etal, etc.)
\usepackage{eccvabbrv}
% Include other packages here, before hyperref.
\usepackage{graphicx}
\usepackage{booktabs}
% The "axessiblity" package can be found at: https://ctan.org/pkg/axessibility?lang=en
\usepackage[accsupp]{axessibility} % Improves PDF readability for those with disabilities.
% ---------------------------------------------------------------
% Hyperref package
% It is strongly recommended to use hyperref, especially for the review version. % It is strongly recommended to use hyperref, especially for the review version.
% Please disable hyperref *only* if you encounter grave issues. % hyperref with option pagebackref eases the reviewers' job.
% hyperref with option pagebackref eases the reviewers' job, but should be disabled for the final version. % Please disable hyperref *only* if you encounter grave issues,
% e.g. with the file validation for the camera-ready version.
% %
% If you comment hyperref and then uncomment it, you should delete % If you comment hyperref and then uncomment it, you should delete *.aux before re-running LaTeX.
% main.aux before re-running LaTeX. % (Or just hit 'q' on the first LaTeX run, let it finish, and you should be clear).
% (Or just hit 'q' on the first LaTeX run, let it finish, and you \definecolor{cvprblue}{rgb}{0.21,0.49,0.74}
% should be clear).
% TODO FINAL: Comment out the following line for the camera-ready version %%%%%%%%% PAPER ID - PLEASE UPDATE
%\usepackage[pagebackref,breaklinks,colorlinks,citecolor=eccvblue]{hyperref} \def\paperID{Supplementary} % *** Enter the Paper ID here
% TODO FINAL: Un-comment the following line for the camera-ready version \def\confName{CVPR}
\usepackage{hyperref} \def\confYear{2026}
\input{packages.tex}
% Support for ORCID icon
\usepackage{orcidlink}
%%%%%%%%% TITLE - PLEASE UPDATE
\newcommand{\name}{\textit{ForNet}\xspace}
\newcommand{\schemename}{\textit{ForAug}\xspace} \newcommand{\schemename}{\textit{ForAug}\xspace}
\title{\schemename: Mitigating Biases and Improving Vision Transformer Training by Recombining Foregrounds and Backgrounds \\ -- Supplementary Material --}
%%%%%%%%% AUTHORS - PLEASE UPDATE
\author{First Author\\
Institution1\\
Institution1 address\\
{\tt\small firstauthor@i1.org}
% For a paper whose authors are all at the same institution,
% omit the following lines up until the closing ``}''.
% Additional authors and addresses can be added with ``\and'',
% just like the second author.
% To save space, use either the email address or home page, not both
\and
Second Author\\
Institution2\\
First line of institution2 address\\
{\tt\small secondauthor@i2.org}
}
\begin{document} \begin{document}
\onecolumn
% ---------------------------------------------------------------
% \title{\schemename: Recombining Foregrounds and Backgrounds to Improve Vision Transformer Training with Bias Mitigation}
\title{\schemename: Mitigating Biases in Image Classification via Controlled Image Compositions\\-- Supplementary Material --}
% TODO REVIEW: If the paper title is too long for the running head, you can set
% an abbreviated paper title here. If not, comment out.
\titlerunning{\schemename -- Supplementary Material}
% TODO FINAL: Replace with your author list.
% Include the authors' OCRID for the camera-ready version, if at all possible.
\author{
Tobias Christian Nauen\inst{1,2}\orcidlink{0000-1111-2222-3333} \and
Brian Moser\inst{2}\orcidlink{1111-2222-3333-4444} \and
Federico Raue\inst{2}\orcidlink{2222--3333-4444-5555} \and \\
Stanislav Frolov\inst{2} \and
Andreas Dengel\inst{1,2}
}
% TODO FINAL: Replace with an abbreviated list of authors.
\authorrunning{T.~C.~Nauen et al.}
% First names are abbreviated in the running head.
% If there are more than two authors, 'et al.' is used.
% TODO FINAL: Replace with your institution list.
\institute{RPTU University Kaiserslautern-Landau, Kaiserslautern, Germany \and
German Research Center for Artificial Intelligence (DFKI), Kaiserslautern, Germany\\
\email{first\_second.last@dfki.de} / \email{first.last@dfki.de}
}
\maketitle \maketitle
\begin{abstract} \begin{abstract}
This is the supplementary material for the paper:\\ This is the supplementary material for the paper: \schemename: Mitigating Biases and Improving Vision Transformer Training by Recombining Foregrounds and Backgrounds
\schemename: Mitigating Biases in Image Classification via Controlled Image Compositions.
\keywords{Data Augmentation \and Vision Transformer \and Robustness}
\end{abstract} \end{abstract}
% \input{sec/abstract.tex}
% \input{sec/intro.tex}
% % \input{sec/intro_old.tex}
% \input{sec/related_work.tex}
% \input{sec/method.tex}
% \input{sec/experiments.tex}
% \input{sec/conclusion.tex}
% \input{sec/acks.tex}
\appendix \appendix
\input{sec/appendix.tex} \input{sec/appendix}
{
\bibliographystyle{splncs04} \small
\bibliographystyle{ieeenat_fullname}
\bibliography{../JabRef/main_bib} \bibliography{../JabRef/main_bib}
}
% \newpage % WARNING: do not forget to delete the supplementary pages from your submission
% \appendix % \input{sec/X_suppl}
\end{document} \end{document}

133
supplementary_old.tex Normal file
View File

@@ -0,0 +1,133 @@
\documentclass[letterpaper]{article} % DO NOT CHANGE THIS
\usepackage[submission]{aaai2026} % DO NOT CHANGE THIS
\usepackage{times} % DO NOT CHANGE THIS
\usepackage{helvet} % DO NOT CHANGE THIS
\usepackage{courier} % DO NOT CHANGE THIS
\usepackage[hyphens]{url} % DO NOT CHANGE THIS
\usepackage{graphicx} % DO NOT CHANGE THIS
\urlstyle{rm} % DO NOT CHANGE THIS
\def\UrlFont{\rm} % DO NOT CHANGE THIS
\usepackage{natbib} % DO NOT CHANGE THIS AND DO NOT ADD ANY OPTIONS TO IT
\usepackage{caption} % DO NOT CHANGE THIS AND DO NOT ADD ANY OPTIONS TO IT
\frenchspacing % DO NOT CHANGE THIS
\setlength{\pdfpagewidth}{8.5in} % DO NOT CHANGE THIS
\setlength{\pdfpageheight}{11in} % DO NOT CHANGE THIS
%
% These are recommended to typeset algorithms but not required. See the subsubsection on algorithms. Remove them if you don't have algorithms in your paper.
\usepackage{algorithm}
\usepackage{algorithmic}
%
% These are are recommended to typeset listings but not required. See the subsubsection on listing. Remove this block if you don't have listings in your paper.
\usepackage{newfloat}
\usepackage{listings}
\DeclareCaptionStyle{ruled}{labelfont=normalfont,labelsep=colon,strut=off} % DO NOT CHANGE THIS
\lstset{%
basicstyle={\footnotesize\ttfamily},% footnotesize acceptable for monospace
numbers=left,numberstyle=\footnotesize,xleftmargin=2em,% show line numbers, remove this entire line if you don't want the numbers.
aboveskip=0pt,belowskip=0pt,%
showstringspaces=false,tabsize=2,breaklines=true}
\floatstyle{ruled}
\newfloat{listing}{tb}{lst}{}
\floatname{listing}{Listing}
\input{packages}
%
% Keep the \pdfinfo as shown here. There's no need
% for you to add the /Title and /Author tags.
\pdfinfo{
/TemplateVersion (2026.1)
}
% DISALLOWED PACKAGES
% \usepackage{authblk} -- This package is specifically forbidden
% \usepackage{balance} -- This package is specifically forbidden
% \usepackage{color (if used in text)
% \usepackage{CJK} -- This package is specifically forbidden
% \usepackage{float} -- This package is specifically forbidden
% \usepackage{flushend} -- This package is specifically forbidden
% \usepackage{fontenc} -- This package is specifically forbidden
% \usepackage{fullpage} -- This package is specifically forbidden
% \usepackage{geometry} -- This package is specifically forbidden
% \usepackage{grffile} -- This package is specifically forbidden
% \usepackage{hyperref} -- This package is specifically forbidden
% \usepackage{navigator} -- This package is specifically forbidden
% (or any other package that embeds links such as navigator or hyperref)
% \indentfirst} -- This package is specifically forbidden
% \layout} -- This package is specifically forbidden
% \multicol} -- This package is specifically forbidden
% \nameref} -- This package is specifically forbidden
% \usepackage{savetrees} -- This package is specifically forbidden
% \usepackage{setspace} -- This package is specifically forbidden
% \usepackage{stfloats} -- This package is specifically forbidden
% \usepackage{tabu} -- This package is specifically forbidden
% \usepackage{titlesec} -- This package is specifically forbidden
% \usepackage{tocbibind} -- This package is specifically forbidden
% \usepackage{ulem} -- This package is specifically forbidden
% \usepackage{wrapfig} -- This package is specifically forbidden
% DISALLOWED COMMANDS
% \nocopyright -- Your paper will not be published if you use this command
% \addtolength -- This command may not be used
% \balance -- This command may not be used
% \baselinestretch -- Your paper will not be published if you use this command
% \clearpage -- No page breaks of any kind may be used for the final version of your paper
% \columnsep -- This command may not be used
% \newpage -- No page breaks of any kind may be used for the final version of your paper
% \pagebreak -- No page breaks of any kind may be used for the final version of your paperr
% \pagestyle -- This command may not be used
% \tiny -- This is not an acceptable font size.
% \vspace{- -- No negative value may be used in proximity of a caption, figure, table, section, subsection, subsubsection, or reference
% \vskip{- -- No negative value may be used to alter spacing above or below a caption, figure, table, section, subsection, subsubsection, or reference
\setcounter{secnumdepth}{0} %May be changed to 1 or 2 if section numbers are desired.
% The file aaai2026.sty is the style file for AAAI Press
% proceedings, working notes, and technical reports.
%
% Title
\newcommand{\name}{\textit{ForNet}\xspace}
\newcommand{\schemename}{\textit{ForAug}\xspace}
% Names: RecombiNet, RecombNet, ReMix, ReMixNet, FoReMix/ForeMix
%%%%%%%%% TITLE - PLEASE UPDATE
\title{\schemename: Recombining Foregrounds and Backgrounds to Improve Vision Transformer Training with Bias Mitigation\\-- Supplementary Material --}
%%%%%%%%% AUTHORS - PLEASE UPDATE
\author {
Tobias Christian Nauen\textsuperscript{\rm 1, \rm 2},
Brian Moser\textsuperscript{\rm 2},
Federico Raue\textsuperscript{\rm 2},
Stanislav Frolov\textsuperscript{\rm 2},
Andreas Dengel\textsuperscript{\rm 1, \rm 2}
}
\affiliations {
\textsuperscript{\rm 1}RPTU Kaiserslautern-Landau, Kaiserslautern, Germany \\
\textsuperscript{\rm 2}German Research Center for Artificial Intelligence (DFKI), Kaiserslautern, Germany \\
{\tt\small first\_second.last@dfki.de / first.last@dfki.de}
}
\begin{document}
\onecolumn
\maketitle
% \input{sec/abstract}
% \input{sec/intro}
% \input{sec/related_work}
% \input{sec/method}
% \input{sec/experiments}
% % \input{sec/future_work}
% \input{sec/conclusion}
% \input{sec/acks}
\begin{abstract}
This is the supplementary material for the paper: \schemename: Recombining Foregrounds and Backgrounds to Improve Vision Transformer Training with Bias Mitigation
\end{abstract}
% \newpage
\appendix
\input{sec/appendix}
\bibliography{../JabRef/main_bib}
\end{document}